xref: /freebsd/sys/amd64/amd64/pmap.c (revision c03c5b1c)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2003 Peter Wemm
11  * All rights reserved.
12  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13  * All rights reserved.
14  *
15  * This code is derived from software contributed to Berkeley by
16  * the Systems Programming Group of the University of Utah Computer
17  * Science Department and William Jolitz of UUNET Technologies Inc.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  * 3. All advertising materials mentioning features or use of this software
28  *    must display the following acknowledgement:
29  *	This product includes software developed by the University of
30  *	California, Berkeley and its contributors.
31  * 4. Neither the name of the University nor the names of its contributors
32  *    may be used to endorse or promote products derived from this software
33  *    without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45  * SUCH DAMAGE.
46  *
47  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
48  */
49 /*-
50  * Copyright (c) 2003 Networks Associates Technology, Inc.
51  * Copyright (c) 2014-2020 The FreeBSD Foundation
52  * All rights reserved.
53  *
54  * This software was developed for the FreeBSD Project by Jake Burkholder,
55  * Safeport Network Services, and Network Associates Laboratories, the
56  * Security Research Division of Network Associates, Inc. under
57  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
58  * CHATS research program.
59  *
60  * Portions of this software were developed by
61  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
62  * the FreeBSD Foundation.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83  * SUCH DAMAGE.
84  */
85 
86 #define	AMD64_NPT_AWARE
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 /*
92  *	Manages physical address maps.
93  *
94  *	Since the information managed by this module is
95  *	also stored by the logical address mapping module,
96  *	this module may throw away valid virtual-to-physical
97  *	mappings at almost any time.  However, invalidations
98  *	of virtual-to-physical mappings must be done as
99  *	requested.
100  *
101  *	In order to cope with hardware architectures which
102  *	make virtual-to-physical map invalidates expensive,
103  *	this module may delay invalidate or reduced protection
104  *	operations until such time as they are actually
105  *	necessary.  This module is given full information as
106  *	to which processors are currently using which maps,
107  *	and to when physical maps must be made correct.
108  */
109 
110 #include "opt_ddb.h"
111 #include "opt_pmap.h"
112 #include "opt_vm.h"
113 
114 #include <sys/param.h>
115 #include <sys/asan.h>
116 #include <sys/bitstring.h>
117 #include <sys/bus.h>
118 #include <sys/systm.h>
119 #include <sys/counter.h>
120 #include <sys/kernel.h>
121 #include <sys/ktr.h>
122 #include <sys/lock.h>
123 #include <sys/malloc.h>
124 #include <sys/mman.h>
125 #include <sys/msan.h>
126 #include <sys/mutex.h>
127 #include <sys/proc.h>
128 #include <sys/rangeset.h>
129 #include <sys/rwlock.h>
130 #include <sys/sbuf.h>
131 #include <sys/smr.h>
132 #include <sys/sx.h>
133 #include <sys/turnstile.h>
134 #include <sys/vmem.h>
135 #include <sys/vmmeter.h>
136 #include <sys/sched.h>
137 #include <sys/sysctl.h>
138 #include <sys/smp.h>
139 #ifdef DDB
140 #include <sys/kdb.h>
141 #include <ddb/ddb.h>
142 #endif
143 
144 #include <vm/vm.h>
145 #include <vm/vm_param.h>
146 #include <vm/vm_kern.h>
147 #include <vm/vm_page.h>
148 #include <vm/vm_map.h>
149 #include <vm/vm_object.h>
150 #include <vm/vm_extern.h>
151 #include <vm/vm_pageout.h>
152 #include <vm/vm_pager.h>
153 #include <vm/vm_phys.h>
154 #include <vm/vm_radix.h>
155 #include <vm/vm_reserv.h>
156 #include <vm/vm_dumpset.h>
157 #include <vm/uma.h>
158 
159 #include <machine/asan.h>
160 #include <machine/intr_machdep.h>
161 #include <x86/apicvar.h>
162 #include <x86/ifunc.h>
163 #include <machine/cpu.h>
164 #include <machine/cputypes.h>
165 #include <machine/md_var.h>
166 #include <machine/msan.h>
167 #include <machine/pcb.h>
168 #include <machine/specialreg.h>
169 #ifdef SMP
170 #include <machine/smp.h>
171 #endif
172 #include <machine/sysarch.h>
173 #include <machine/tss.h>
174 
175 #ifdef NUMA
176 #define	PMAP_MEMDOM	MAXMEMDOM
177 #else
178 #define	PMAP_MEMDOM	1
179 #endif
180 
181 static __inline boolean_t
182 pmap_type_guest(pmap_t pmap)
183 {
184 
185 	return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
186 }
187 
188 static __inline boolean_t
189 pmap_emulate_ad_bits(pmap_t pmap)
190 {
191 
192 	return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
193 }
194 
195 static __inline pt_entry_t
196 pmap_valid_bit(pmap_t pmap)
197 {
198 	pt_entry_t mask;
199 
200 	switch (pmap->pm_type) {
201 	case PT_X86:
202 	case PT_RVI:
203 		mask = X86_PG_V;
204 		break;
205 	case PT_EPT:
206 		if (pmap_emulate_ad_bits(pmap))
207 			mask = EPT_PG_EMUL_V;
208 		else
209 			mask = EPT_PG_READ;
210 		break;
211 	default:
212 		panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
213 	}
214 
215 	return (mask);
216 }
217 
218 static __inline pt_entry_t
219 pmap_rw_bit(pmap_t pmap)
220 {
221 	pt_entry_t mask;
222 
223 	switch (pmap->pm_type) {
224 	case PT_X86:
225 	case PT_RVI:
226 		mask = X86_PG_RW;
227 		break;
228 	case PT_EPT:
229 		if (pmap_emulate_ad_bits(pmap))
230 			mask = EPT_PG_EMUL_RW;
231 		else
232 			mask = EPT_PG_WRITE;
233 		break;
234 	default:
235 		panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
236 	}
237 
238 	return (mask);
239 }
240 
241 static pt_entry_t pg_g;
242 
243 static __inline pt_entry_t
244 pmap_global_bit(pmap_t pmap)
245 {
246 	pt_entry_t mask;
247 
248 	switch (pmap->pm_type) {
249 	case PT_X86:
250 		mask = pg_g;
251 		break;
252 	case PT_RVI:
253 	case PT_EPT:
254 		mask = 0;
255 		break;
256 	default:
257 		panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
258 	}
259 
260 	return (mask);
261 }
262 
263 static __inline pt_entry_t
264 pmap_accessed_bit(pmap_t pmap)
265 {
266 	pt_entry_t mask;
267 
268 	switch (pmap->pm_type) {
269 	case PT_X86:
270 	case PT_RVI:
271 		mask = X86_PG_A;
272 		break;
273 	case PT_EPT:
274 		if (pmap_emulate_ad_bits(pmap))
275 			mask = EPT_PG_READ;
276 		else
277 			mask = EPT_PG_A;
278 		break;
279 	default:
280 		panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
281 	}
282 
283 	return (mask);
284 }
285 
286 static __inline pt_entry_t
287 pmap_modified_bit(pmap_t pmap)
288 {
289 	pt_entry_t mask;
290 
291 	switch (pmap->pm_type) {
292 	case PT_X86:
293 	case PT_RVI:
294 		mask = X86_PG_M;
295 		break;
296 	case PT_EPT:
297 		if (pmap_emulate_ad_bits(pmap))
298 			mask = EPT_PG_WRITE;
299 		else
300 			mask = EPT_PG_M;
301 		break;
302 	default:
303 		panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
304 	}
305 
306 	return (mask);
307 }
308 
309 static __inline pt_entry_t
310 pmap_pku_mask_bit(pmap_t pmap)
311 {
312 
313 	return (pmap->pm_type == PT_X86 ? X86_PG_PKU_MASK : 0);
314 }
315 
316 #if !defined(DIAGNOSTIC)
317 #ifdef __GNUC_GNU_INLINE__
318 #define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
319 #else
320 #define PMAP_INLINE	extern inline
321 #endif
322 #else
323 #define PMAP_INLINE
324 #endif
325 
326 #ifdef PV_STATS
327 #define PV_STAT(x)	do { x ; } while (0)
328 #else
329 #define PV_STAT(x)	do { } while (0)
330 #endif
331 
332 #undef pa_index
333 #ifdef NUMA
334 #define	pa_index(pa)	({					\
335 	KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end,	\
336 	    ("address %lx beyond the last segment", (pa)));	\
337 	(pa) >> PDRSHIFT;					\
338 })
339 #define	pa_to_pmdp(pa)	(&pv_table[pa_index(pa)])
340 #define	pa_to_pvh(pa)	(&(pa_to_pmdp(pa)->pv_page))
341 #define	PHYS_TO_PV_LIST_LOCK(pa)	({			\
342 	struct rwlock *_lock;					\
343 	if (__predict_false((pa) > pmap_last_pa))		\
344 		_lock = &pv_dummy_large.pv_lock;		\
345 	else							\
346 		_lock = &(pa_to_pmdp(pa)->pv_lock);		\
347 	_lock;							\
348 })
349 #else
350 #define	pa_index(pa)	((pa) >> PDRSHIFT)
351 #define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
352 
353 #define	NPV_LIST_LOCKS	MAXCPU
354 
355 #define	PHYS_TO_PV_LIST_LOCK(pa)	\
356 			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
357 #endif
358 
359 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
360 	struct rwlock **_lockp = (lockp);		\
361 	struct rwlock *_new_lock;			\
362 							\
363 	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
364 	if (_new_lock != *_lockp) {			\
365 		if (*_lockp != NULL)			\
366 			rw_wunlock(*_lockp);		\
367 		*_lockp = _new_lock;			\
368 		rw_wlock(*_lockp);			\
369 	}						\
370 } while (0)
371 
372 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
373 			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
374 
375 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
376 	struct rwlock **_lockp = (lockp);		\
377 							\
378 	if (*_lockp != NULL) {				\
379 		rw_wunlock(*_lockp);			\
380 		*_lockp = NULL;				\
381 	}						\
382 } while (0)
383 
384 #define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
385 			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
386 
387 struct pmap kernel_pmap_store;
388 
389 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
390 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
391 
392 int nkpt;
393 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
394     "Number of kernel page table pages allocated on bootup");
395 
396 static int ndmpdp;
397 vm_paddr_t dmaplimit;
398 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
399 pt_entry_t pg_nx;
400 
401 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
402     "VM/pmap parameters");
403 
404 static int pg_ps_enabled = 1;
405 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
406     &pg_ps_enabled, 0, "Are large page mappings enabled?");
407 
408 int __read_frequently la57 = 0;
409 SYSCTL_INT(_vm_pmap, OID_AUTO, la57, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
410     &la57, 0,
411     "5-level paging for host is enabled");
412 
413 static bool
414 pmap_is_la57(pmap_t pmap)
415 {
416 	if (pmap->pm_type == PT_X86)
417 		return (la57);
418 	return (false);		/* XXXKIB handle EPT */
419 }
420 
421 #define	PAT_INDEX_SIZE	8
422 static int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
423 
424 static u_int64_t	KPTphys;	/* phys addr of kernel level 1 */
425 static u_int64_t	KPDphys;	/* phys addr of kernel level 2 */
426 static u_int64_t	KPDPphys;	/* phys addr of kernel level 3 */
427 u_int64_t		KPML4phys;	/* phys addr of kernel level 4 */
428 u_int64_t		KPML5phys;	/* phys addr of kernel level 5,
429 					   if supported */
430 
431 #ifdef KASAN
432 static uint64_t		KASANPDPphys;
433 #endif
434 #ifdef KMSAN
435 static uint64_t		KMSANSHADPDPphys;
436 static uint64_t		KMSANORIGPDPphys;
437 
438 /*
439  * To support systems with large amounts of memory, it is necessary to extend
440  * the maximum size of the direct map.  This could eat into the space reserved
441  * for the shadow map.
442  */
443 _Static_assert(DMPML4I + NDMPML4E <= KMSANSHADPML4I, "direct map overflow");
444 #endif
445 
446 static pml4_entry_t	*kernel_pml4;
447 static u_int64_t	DMPDphys;	/* phys addr of direct mapped level 2 */
448 static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
449 static int		ndmpdpphys;	/* number of DMPDPphys pages */
450 
451 vm_paddr_t		kernphys;	/* phys addr of start of bootstrap data */
452 vm_paddr_t		KERNend;	/* and the end */
453 
454 /*
455  * pmap_mapdev support pre initialization (i.e. console)
456  */
457 #define	PMAP_PREINIT_MAPPING_COUNT	8
458 static struct pmap_preinit_mapping {
459 	vm_paddr_t	pa;
460 	vm_offset_t	va;
461 	vm_size_t	sz;
462 	int		mode;
463 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
464 static int pmap_initialized;
465 
466 /*
467  * Data for the pv entry allocation mechanism.
468  * Updates to pv_invl_gen are protected by the pv list lock but reads are not.
469  */
470 #ifdef NUMA
471 static __inline int
472 pc_to_domain(struct pv_chunk *pc)
473 {
474 
475 	return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
476 }
477 #else
478 static __inline int
479 pc_to_domain(struct pv_chunk *pc __unused)
480 {
481 
482 	return (0);
483 }
484 #endif
485 
486 struct pv_chunks_list {
487 	struct mtx pvc_lock;
488 	TAILQ_HEAD(pch, pv_chunk) pvc_list;
489 	int active_reclaims;
490 } __aligned(CACHE_LINE_SIZE);
491 
492 struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM];
493 
494 #ifdef	NUMA
495 struct pmap_large_md_page {
496 	struct rwlock   pv_lock;
497 	struct md_page  pv_page;
498 	u_long pv_invl_gen;
499 };
500 __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
501 #define pv_dummy pv_dummy_large.pv_page
502 __read_mostly static struct pmap_large_md_page *pv_table;
503 __read_mostly vm_paddr_t pmap_last_pa;
504 #else
505 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
506 static u_long pv_invl_gen[NPV_LIST_LOCKS];
507 static struct md_page *pv_table;
508 static struct md_page pv_dummy;
509 #endif
510 
511 /*
512  * All those kernel PT submaps that BSD is so fond of
513  */
514 pt_entry_t *CMAP1 = NULL;
515 caddr_t CADDR1 = 0;
516 static vm_offset_t qframe = 0;
517 static struct mtx qframe_mtx;
518 
519 static int pmap_flags = PMAP_PDE_SUPERPAGE;	/* flags for x86 pmaps */
520 
521 static vmem_t *large_vmem;
522 static u_int lm_ents;
523 #define	PMAP_ADDRESS_IN_LARGEMAP(va)	((va) >= LARGEMAP_MIN_ADDRESS && \
524 	(va) < LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
525 
526 int pmap_pcid_enabled = 1;
527 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
528     &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
529 int invpcid_works = 0;
530 SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
531     "Is the invpcid instruction available ?");
532 
533 int __read_frequently pti = 0;
534 SYSCTL_INT(_vm_pmap, OID_AUTO, pti, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
535     &pti, 0,
536     "Page Table Isolation enabled");
537 static vm_object_t pti_obj;
538 static pml4_entry_t *pti_pml4;
539 static vm_pindex_t pti_pg_idx;
540 static bool pti_finalized;
541 
542 struct pmap_pkru_range {
543 	struct rs_el	pkru_rs_el;
544 	u_int		pkru_keyidx;
545 	int		pkru_flags;
546 };
547 
548 static uma_zone_t pmap_pkru_ranges_zone;
549 static bool pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
550 static pt_entry_t pmap_pkru_get(pmap_t pmap, vm_offset_t va);
551 static void pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
552 static void *pkru_dup_range(void *ctx, void *data);
553 static void pkru_free_range(void *ctx, void *node);
554 static int pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap);
555 static int pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
556 static void pmap_pkru_deassign_all(pmap_t pmap);
557 
558 static COUNTER_U64_DEFINE_EARLY(pcid_save_cnt);
559 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLFLAG_RD,
560     &pcid_save_cnt, "Count of saved TLB context on switch");
561 
562 static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
563     LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
564 static struct mtx invl_gen_mtx;
565 /* Fake lock object to satisfy turnstiles interface. */
566 static struct lock_object invl_gen_ts = {
567 	.lo_name = "invlts",
568 };
569 static struct pmap_invl_gen pmap_invl_gen_head = {
570 	.gen = 1,
571 	.next = NULL,
572 };
573 static u_long pmap_invl_gen = 1;
574 static int pmap_invl_waiters;
575 static struct callout pmap_invl_callout;
576 static bool pmap_invl_callout_inited;
577 
578 #define	PMAP_ASSERT_NOT_IN_DI() \
579     KASSERT(pmap_not_in_di(), ("DI already started"))
580 
581 static bool
582 pmap_di_locked(void)
583 {
584 	int tun;
585 
586 	if ((cpu_feature2 & CPUID2_CX16) == 0)
587 		return (true);
588 	tun = 0;
589 	TUNABLE_INT_FETCH("vm.pmap.di_locked", &tun);
590 	return (tun != 0);
591 }
592 
593 static int
594 sysctl_pmap_di_locked(SYSCTL_HANDLER_ARGS)
595 {
596 	int locked;
597 
598 	locked = pmap_di_locked();
599 	return (sysctl_handle_int(oidp, &locked, 0, req));
600 }
601 SYSCTL_PROC(_vm_pmap, OID_AUTO, di_locked, CTLTYPE_INT | CTLFLAG_RDTUN |
602     CTLFLAG_MPSAFE, 0, 0, sysctl_pmap_di_locked, "",
603     "Locked delayed invalidation");
604 
605 static bool pmap_not_in_di_l(void);
606 static bool pmap_not_in_di_u(void);
607 DEFINE_IFUNC(, bool, pmap_not_in_di, (void))
608 {
609 
610 	return (pmap_di_locked() ? pmap_not_in_di_l : pmap_not_in_di_u);
611 }
612 
613 static bool
614 pmap_not_in_di_l(void)
615 {
616 	struct pmap_invl_gen *invl_gen;
617 
618 	invl_gen = &curthread->td_md.md_invl_gen;
619 	return (invl_gen->gen == 0);
620 }
621 
622 static void
623 pmap_thread_init_invl_gen_l(struct thread *td)
624 {
625 	struct pmap_invl_gen *invl_gen;
626 
627 	invl_gen = &td->td_md.md_invl_gen;
628 	invl_gen->gen = 0;
629 }
630 
631 static void
632 pmap_delayed_invl_wait_block(u_long *m_gen, u_long *invl_gen)
633 {
634 	struct turnstile *ts;
635 
636 	ts = turnstile_trywait(&invl_gen_ts);
637 	if (*m_gen > atomic_load_long(invl_gen))
638 		turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
639 	else
640 		turnstile_cancel(ts);
641 }
642 
643 static void
644 pmap_delayed_invl_finish_unblock(u_long new_gen)
645 {
646 	struct turnstile *ts;
647 
648 	turnstile_chain_lock(&invl_gen_ts);
649 	ts = turnstile_lookup(&invl_gen_ts);
650 	if (new_gen != 0)
651 		pmap_invl_gen = new_gen;
652 	if (ts != NULL) {
653 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
654 		turnstile_unpend(ts);
655 	}
656 	turnstile_chain_unlock(&invl_gen_ts);
657 }
658 
659 /*
660  * Start a new Delayed Invalidation (DI) block of code, executed by
661  * the current thread.  Within a DI block, the current thread may
662  * destroy both the page table and PV list entries for a mapping and
663  * then release the corresponding PV list lock before ensuring that
664  * the mapping is flushed from the TLBs of any processors with the
665  * pmap active.
666  */
667 static void
668 pmap_delayed_invl_start_l(void)
669 {
670 	struct pmap_invl_gen *invl_gen;
671 	u_long currgen;
672 
673 	invl_gen = &curthread->td_md.md_invl_gen;
674 	PMAP_ASSERT_NOT_IN_DI();
675 	mtx_lock(&invl_gen_mtx);
676 	if (LIST_EMPTY(&pmap_invl_gen_tracker))
677 		currgen = pmap_invl_gen;
678 	else
679 		currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
680 	invl_gen->gen = currgen + 1;
681 	LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
682 	mtx_unlock(&invl_gen_mtx);
683 }
684 
685 /*
686  * Finish the DI block, previously started by the current thread.  All
687  * required TLB flushes for the pages marked by
688  * pmap_delayed_invl_page() must be finished before this function is
689  * called.
690  *
691  * This function works by bumping the global DI generation number to
692  * the generation number of the current thread's DI, unless there is a
693  * pending DI that started earlier.  In the latter case, bumping the
694  * global DI generation number would incorrectly signal that the
695  * earlier DI had finished.  Instead, this function bumps the earlier
696  * DI's generation number to match the generation number of the
697  * current thread's DI.
698  */
699 static void
700 pmap_delayed_invl_finish_l(void)
701 {
702 	struct pmap_invl_gen *invl_gen, *next;
703 
704 	invl_gen = &curthread->td_md.md_invl_gen;
705 	KASSERT(invl_gen->gen != 0, ("missed invl_start"));
706 	mtx_lock(&invl_gen_mtx);
707 	next = LIST_NEXT(invl_gen, link);
708 	if (next == NULL)
709 		pmap_delayed_invl_finish_unblock(invl_gen->gen);
710 	else
711 		next->gen = invl_gen->gen;
712 	LIST_REMOVE(invl_gen, link);
713 	mtx_unlock(&invl_gen_mtx);
714 	invl_gen->gen = 0;
715 }
716 
717 static bool
718 pmap_not_in_di_u(void)
719 {
720 	struct pmap_invl_gen *invl_gen;
721 
722 	invl_gen = &curthread->td_md.md_invl_gen;
723 	return (((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) != 0);
724 }
725 
726 static void
727 pmap_thread_init_invl_gen_u(struct thread *td)
728 {
729 	struct pmap_invl_gen *invl_gen;
730 
731 	invl_gen = &td->td_md.md_invl_gen;
732 	invl_gen->gen = 0;
733 	invl_gen->next = (void *)PMAP_INVL_GEN_NEXT_INVALID;
734 }
735 
736 static bool
737 pmap_di_load_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *out)
738 {
739 	uint64_t new_high, new_low, old_high, old_low;
740 	char res;
741 
742 	old_low = new_low = 0;
743 	old_high = new_high = (uintptr_t)0;
744 
745 	__asm volatile("lock;cmpxchg16b\t%1"
746 	    : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
747 	    : "b"(new_low), "c" (new_high)
748 	    : "memory", "cc");
749 	if (res == 0) {
750 		if ((old_high & PMAP_INVL_GEN_NEXT_INVALID) != 0)
751 			return (false);
752 		out->gen = old_low;
753 		out->next = (void *)old_high;
754 	} else {
755 		out->gen = new_low;
756 		out->next = (void *)new_high;
757 	}
758 	return (true);
759 }
760 
761 static bool
762 pmap_di_store_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *old_val,
763     struct pmap_invl_gen *new_val)
764 {
765 	uint64_t new_high, new_low, old_high, old_low;
766 	char res;
767 
768 	new_low = new_val->gen;
769 	new_high = (uintptr_t)new_val->next;
770 	old_low = old_val->gen;
771 	old_high = (uintptr_t)old_val->next;
772 
773 	__asm volatile("lock;cmpxchg16b\t%1"
774 	    : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
775 	    : "b"(new_low), "c" (new_high)
776 	    : "memory", "cc");
777 	return (res);
778 }
779 
780 static COUNTER_U64_DEFINE_EARLY(pv_page_count);
781 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_page_count, CTLFLAG_RD,
782     &pv_page_count, "Current number of allocated pv pages");
783 
784 static COUNTER_U64_DEFINE_EARLY(user_pt_page_count);
785 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, user_pt_page_count, CTLFLAG_RD,
786     &user_pt_page_count,
787     "Current number of allocated page table pages for userspace");
788 
789 static COUNTER_U64_DEFINE_EARLY(kernel_pt_page_count);
790 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, kernel_pt_page_count, CTLFLAG_RD,
791     &kernel_pt_page_count,
792     "Current number of allocated page table pages for the kernel");
793 
794 #ifdef PV_STATS
795 
796 static COUNTER_U64_DEFINE_EARLY(invl_start_restart);
797 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_start_restart,
798     CTLFLAG_RD, &invl_start_restart,
799     "Number of delayed TLB invalidation request restarts");
800 
801 static COUNTER_U64_DEFINE_EARLY(invl_finish_restart);
802 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_finish_restart, CTLFLAG_RD,
803     &invl_finish_restart,
804     "Number of delayed TLB invalidation completion restarts");
805 
806 static int invl_max_qlen;
807 SYSCTL_INT(_vm_pmap, OID_AUTO, invl_max_qlen, CTLFLAG_RD,
808     &invl_max_qlen, 0,
809     "Maximum delayed TLB invalidation request queue length");
810 #endif
811 
812 #define di_delay	locks_delay
813 
814 static void
815 pmap_delayed_invl_start_u(void)
816 {
817 	struct pmap_invl_gen *invl_gen, *p, prev, new_prev;
818 	struct thread *td;
819 	struct lock_delay_arg lda;
820 	uintptr_t prevl;
821 	u_char pri;
822 #ifdef PV_STATS
823 	int i, ii;
824 #endif
825 
826 	td = curthread;
827 	invl_gen = &td->td_md.md_invl_gen;
828 	PMAP_ASSERT_NOT_IN_DI();
829 	lock_delay_arg_init(&lda, &di_delay);
830 	invl_gen->saved_pri = 0;
831 	pri = td->td_base_pri;
832 	if (pri > PVM) {
833 		thread_lock(td);
834 		pri = td->td_base_pri;
835 		if (pri > PVM) {
836 			invl_gen->saved_pri = pri;
837 			sched_prio(td, PVM);
838 		}
839 		thread_unlock(td);
840 	}
841 again:
842 	PV_STAT(i = 0);
843 	for (p = &pmap_invl_gen_head;; p = prev.next) {
844 		PV_STAT(i++);
845 		prevl = (uintptr_t)atomic_load_ptr(&p->next);
846 		if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
847 			PV_STAT(counter_u64_add(invl_start_restart, 1));
848 			lock_delay(&lda);
849 			goto again;
850 		}
851 		if (prevl == 0)
852 			break;
853 		prev.next = (void *)prevl;
854 	}
855 #ifdef PV_STATS
856 	if ((ii = invl_max_qlen) < i)
857 		atomic_cmpset_int(&invl_max_qlen, ii, i);
858 #endif
859 
860 	if (!pmap_di_load_invl(p, &prev) || prev.next != NULL) {
861 		PV_STAT(counter_u64_add(invl_start_restart, 1));
862 		lock_delay(&lda);
863 		goto again;
864 	}
865 
866 	new_prev.gen = prev.gen;
867 	new_prev.next = invl_gen;
868 	invl_gen->gen = prev.gen + 1;
869 
870 	/* Formal fence between store to invl->gen and updating *p. */
871 	atomic_thread_fence_rel();
872 
873 	/*
874 	 * After inserting an invl_gen element with invalid bit set,
875 	 * this thread blocks any other thread trying to enter the
876 	 * delayed invalidation block.  Do not allow to remove us from
877 	 * the CPU, because it causes starvation for other threads.
878 	 */
879 	critical_enter();
880 
881 	/*
882 	 * ABA for *p is not possible there, since p->gen can only
883 	 * increase.  So if the *p thread finished its di, then
884 	 * started a new one and got inserted into the list at the
885 	 * same place, its gen will appear greater than the previously
886 	 * read gen.
887 	 */
888 	if (!pmap_di_store_invl(p, &prev, &new_prev)) {
889 		critical_exit();
890 		PV_STAT(counter_u64_add(invl_start_restart, 1));
891 		lock_delay(&lda);
892 		goto again;
893 	}
894 
895 	/*
896 	 * There we clear PMAP_INVL_GEN_NEXT_INVALID in
897 	 * invl_gen->next, allowing other threads to iterate past us.
898 	 * pmap_di_store_invl() provides fence between the generation
899 	 * write and the update of next.
900 	 */
901 	invl_gen->next = NULL;
902 	critical_exit();
903 }
904 
905 static bool
906 pmap_delayed_invl_finish_u_crit(struct pmap_invl_gen *invl_gen,
907     struct pmap_invl_gen *p)
908 {
909 	struct pmap_invl_gen prev, new_prev;
910 	u_long mygen;
911 
912 	/*
913 	 * Load invl_gen->gen after setting invl_gen->next
914 	 * PMAP_INVL_GEN_NEXT_INVALID.  This prevents larger
915 	 * generations to propagate to our invl_gen->gen.  Lock prefix
916 	 * in atomic_set_ptr() worked as seq_cst fence.
917 	 */
918 	mygen = atomic_load_long(&invl_gen->gen);
919 
920 	if (!pmap_di_load_invl(p, &prev) || prev.next != invl_gen)
921 		return (false);
922 
923 	KASSERT(prev.gen < mygen,
924 	    ("invalid di gen sequence %lu %lu", prev.gen, mygen));
925 	new_prev.gen = mygen;
926 	new_prev.next = (void *)((uintptr_t)invl_gen->next &
927 	    ~PMAP_INVL_GEN_NEXT_INVALID);
928 
929 	/* Formal fence between load of prev and storing update to it. */
930 	atomic_thread_fence_rel();
931 
932 	return (pmap_di_store_invl(p, &prev, &new_prev));
933 }
934 
935 static void
936 pmap_delayed_invl_finish_u(void)
937 {
938 	struct pmap_invl_gen *invl_gen, *p;
939 	struct thread *td;
940 	struct lock_delay_arg lda;
941 	uintptr_t prevl;
942 
943 	td = curthread;
944 	invl_gen = &td->td_md.md_invl_gen;
945 	KASSERT(invl_gen->gen != 0, ("missed invl_start: gen 0"));
946 	KASSERT(((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) == 0,
947 	    ("missed invl_start: INVALID"));
948 	lock_delay_arg_init(&lda, &di_delay);
949 
950 again:
951 	for (p = &pmap_invl_gen_head; p != NULL; p = (void *)prevl) {
952 		prevl = (uintptr_t)atomic_load_ptr(&p->next);
953 		if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
954 			PV_STAT(counter_u64_add(invl_finish_restart, 1));
955 			lock_delay(&lda);
956 			goto again;
957 		}
958 		if ((void *)prevl == invl_gen)
959 			break;
960 	}
961 
962 	/*
963 	 * It is legitimate to not find ourself on the list if a
964 	 * thread before us finished its DI and started it again.
965 	 */
966 	if (__predict_false(p == NULL)) {
967 		PV_STAT(counter_u64_add(invl_finish_restart, 1));
968 		lock_delay(&lda);
969 		goto again;
970 	}
971 
972 	critical_enter();
973 	atomic_set_ptr((uintptr_t *)&invl_gen->next,
974 	    PMAP_INVL_GEN_NEXT_INVALID);
975 	if (!pmap_delayed_invl_finish_u_crit(invl_gen, p)) {
976 		atomic_clear_ptr((uintptr_t *)&invl_gen->next,
977 		    PMAP_INVL_GEN_NEXT_INVALID);
978 		critical_exit();
979 		PV_STAT(counter_u64_add(invl_finish_restart, 1));
980 		lock_delay(&lda);
981 		goto again;
982 	}
983 	critical_exit();
984 	if (atomic_load_int(&pmap_invl_waiters) > 0)
985 		pmap_delayed_invl_finish_unblock(0);
986 	if (invl_gen->saved_pri != 0) {
987 		thread_lock(td);
988 		sched_prio(td, invl_gen->saved_pri);
989 		thread_unlock(td);
990 	}
991 }
992 
993 #ifdef DDB
994 DB_SHOW_COMMAND(di_queue, pmap_di_queue)
995 {
996 	struct pmap_invl_gen *p, *pn;
997 	struct thread *td;
998 	uintptr_t nextl;
999 	bool first;
1000 
1001 	for (p = &pmap_invl_gen_head, first = true; p != NULL; p = pn,
1002 	    first = false) {
1003 		nextl = (uintptr_t)atomic_load_ptr(&p->next);
1004 		pn = (void *)(nextl & ~PMAP_INVL_GEN_NEXT_INVALID);
1005 		td = first ? NULL : __containerof(p, struct thread,
1006 		    td_md.md_invl_gen);
1007 		db_printf("gen %lu inv %d td %p tid %d\n", p->gen,
1008 		    (nextl & PMAP_INVL_GEN_NEXT_INVALID) != 0, td,
1009 		    td != NULL ? td->td_tid : -1);
1010 	}
1011 }
1012 #endif
1013 
1014 #ifdef PV_STATS
1015 static COUNTER_U64_DEFINE_EARLY(invl_wait);
1016 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_wait,
1017     CTLFLAG_RD, &invl_wait,
1018     "Number of times DI invalidation blocked pmap_remove_all/write");
1019 
1020 static COUNTER_U64_DEFINE_EARLY(invl_wait_slow);
1021 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFLAG_RD,
1022      &invl_wait_slow, "Number of slow invalidation waits for lockless DI");
1023 
1024 #endif
1025 
1026 #ifdef NUMA
1027 static u_long *
1028 pmap_delayed_invl_genp(vm_page_t m)
1029 {
1030 	vm_paddr_t pa;
1031 	u_long *gen;
1032 
1033 	pa = VM_PAGE_TO_PHYS(m);
1034 	if (__predict_false((pa) > pmap_last_pa))
1035 		gen = &pv_dummy_large.pv_invl_gen;
1036 	else
1037 		gen = &(pa_to_pmdp(pa)->pv_invl_gen);
1038 
1039 	return (gen);
1040 }
1041 #else
1042 static u_long *
1043 pmap_delayed_invl_genp(vm_page_t m)
1044 {
1045 
1046 	return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
1047 }
1048 #endif
1049 
1050 static void
1051 pmap_delayed_invl_callout_func(void *arg __unused)
1052 {
1053 
1054 	if (atomic_load_int(&pmap_invl_waiters) == 0)
1055 		return;
1056 	pmap_delayed_invl_finish_unblock(0);
1057 }
1058 
1059 static void
1060 pmap_delayed_invl_callout_init(void *arg __unused)
1061 {
1062 
1063 	if (pmap_di_locked())
1064 		return;
1065 	callout_init(&pmap_invl_callout, 1);
1066 	pmap_invl_callout_inited = true;
1067 }
1068 SYSINIT(pmap_di_callout, SI_SUB_CPU + 1, SI_ORDER_ANY,
1069     pmap_delayed_invl_callout_init, NULL);
1070 
1071 /*
1072  * Ensure that all currently executing DI blocks, that need to flush
1073  * TLB for the given page m, actually flushed the TLB at the time the
1074  * function returned.  If the page m has an empty PV list and we call
1075  * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
1076  * valid mapping for the page m in either its page table or TLB.
1077  *
1078  * This function works by blocking until the global DI generation
1079  * number catches up with the generation number associated with the
1080  * given page m and its PV list.  Since this function's callers
1081  * typically own an object lock and sometimes own a page lock, it
1082  * cannot sleep.  Instead, it blocks on a turnstile to relinquish the
1083  * processor.
1084  */
1085 static void
1086 pmap_delayed_invl_wait_l(vm_page_t m)
1087 {
1088 	u_long *m_gen;
1089 #ifdef PV_STATS
1090 	bool accounted = false;
1091 #endif
1092 
1093 	m_gen = pmap_delayed_invl_genp(m);
1094 	while (*m_gen > pmap_invl_gen) {
1095 #ifdef PV_STATS
1096 		if (!accounted) {
1097 			counter_u64_add(invl_wait, 1);
1098 			accounted = true;
1099 		}
1100 #endif
1101 		pmap_delayed_invl_wait_block(m_gen, &pmap_invl_gen);
1102 	}
1103 }
1104 
1105 static void
1106 pmap_delayed_invl_wait_u(vm_page_t m)
1107 {
1108 	u_long *m_gen;
1109 	struct lock_delay_arg lda;
1110 	bool fast;
1111 
1112 	fast = true;
1113 	m_gen = pmap_delayed_invl_genp(m);
1114 	lock_delay_arg_init(&lda, &di_delay);
1115 	while (*m_gen > atomic_load_long(&pmap_invl_gen_head.gen)) {
1116 		if (fast || !pmap_invl_callout_inited) {
1117 			PV_STAT(counter_u64_add(invl_wait, 1));
1118 			lock_delay(&lda);
1119 			fast = false;
1120 		} else {
1121 			/*
1122 			 * The page's invalidation generation number
1123 			 * is still below the current thread's number.
1124 			 * Prepare to block so that we do not waste
1125 			 * CPU cycles or worse, suffer livelock.
1126 			 *
1127 			 * Since it is impossible to block without
1128 			 * racing with pmap_delayed_invl_finish_u(),
1129 			 * prepare for the race by incrementing
1130 			 * pmap_invl_waiters and arming a 1-tick
1131 			 * callout which will unblock us if we lose
1132 			 * the race.
1133 			 */
1134 			atomic_add_int(&pmap_invl_waiters, 1);
1135 
1136 			/*
1137 			 * Re-check the current thread's invalidation
1138 			 * generation after incrementing
1139 			 * pmap_invl_waiters, so that there is no race
1140 			 * with pmap_delayed_invl_finish_u() setting
1141 			 * the page generation and checking
1142 			 * pmap_invl_waiters.  The only race allowed
1143 			 * is for a missed unblock, which is handled
1144 			 * by the callout.
1145 			 */
1146 			if (*m_gen >
1147 			    atomic_load_long(&pmap_invl_gen_head.gen)) {
1148 				callout_reset(&pmap_invl_callout, 1,
1149 				    pmap_delayed_invl_callout_func, NULL);
1150 				PV_STAT(counter_u64_add(invl_wait_slow, 1));
1151 				pmap_delayed_invl_wait_block(m_gen,
1152 				    &pmap_invl_gen_head.gen);
1153 			}
1154 			atomic_add_int(&pmap_invl_waiters, -1);
1155 		}
1156 	}
1157 }
1158 
1159 DEFINE_IFUNC(, void, pmap_thread_init_invl_gen, (struct thread *))
1160 {
1161 
1162 	return (pmap_di_locked() ? pmap_thread_init_invl_gen_l :
1163 	    pmap_thread_init_invl_gen_u);
1164 }
1165 
1166 DEFINE_IFUNC(static, void, pmap_delayed_invl_start, (void))
1167 {
1168 
1169 	return (pmap_di_locked() ? pmap_delayed_invl_start_l :
1170 	    pmap_delayed_invl_start_u);
1171 }
1172 
1173 DEFINE_IFUNC(static, void, pmap_delayed_invl_finish, (void))
1174 {
1175 
1176 	return (pmap_di_locked() ? pmap_delayed_invl_finish_l :
1177 	    pmap_delayed_invl_finish_u);
1178 }
1179 
1180 DEFINE_IFUNC(static, void, pmap_delayed_invl_wait, (vm_page_t))
1181 {
1182 
1183 	return (pmap_di_locked() ? pmap_delayed_invl_wait_l :
1184 	    pmap_delayed_invl_wait_u);
1185 }
1186 
1187 /*
1188  * Mark the page m's PV list as participating in the current thread's
1189  * DI block.  Any threads concurrently using m's PV list to remove or
1190  * restrict all mappings to m will wait for the current thread's DI
1191  * block to complete before proceeding.
1192  *
1193  * The function works by setting the DI generation number for m's PV
1194  * list to at least the DI generation number of the current thread.
1195  * This forces a caller of pmap_delayed_invl_wait() to block until
1196  * current thread calls pmap_delayed_invl_finish().
1197  */
1198 static void
1199 pmap_delayed_invl_page(vm_page_t m)
1200 {
1201 	u_long gen, *m_gen;
1202 
1203 	rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
1204 	gen = curthread->td_md.md_invl_gen.gen;
1205 	if (gen == 0)
1206 		return;
1207 	m_gen = pmap_delayed_invl_genp(m);
1208 	if (*m_gen < gen)
1209 		*m_gen = gen;
1210 }
1211 
1212 /*
1213  * Crashdump maps.
1214  */
1215 static caddr_t crashdumpmap;
1216 
1217 /*
1218  * Internal flags for pmap_enter()'s helper functions.
1219  */
1220 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
1221 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
1222 
1223 /*
1224  * Internal flags for pmap_mapdev_internal() and
1225  * pmap_change_props_locked().
1226  */
1227 #define	MAPDEV_FLUSHCACHE	0x00000001	/* Flush cache after mapping. */
1228 #define	MAPDEV_SETATTR		0x00000002	/* Modify existing attrs. */
1229 #define	MAPDEV_ASSERTVALID	0x00000004	/* Assert mapping validity. */
1230 
1231 TAILQ_HEAD(pv_chunklist, pv_chunk);
1232 
1233 static void	free_pv_chunk(struct pv_chunk *pc);
1234 static void	free_pv_chunk_batch(struct pv_chunklist *batch);
1235 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
1236 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
1237 static int	popcnt_pc_map_pq(uint64_t *map);
1238 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
1239 static void	reserve_pv_entries(pmap_t pmap, int needed,
1240 		    struct rwlock **lockp);
1241 static void	pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1242 		    struct rwlock **lockp);
1243 static bool	pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
1244 		    u_int flags, struct rwlock **lockp);
1245 #if VM_NRESERVLEVEL > 0
1246 static void	pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1247 		    struct rwlock **lockp);
1248 #endif
1249 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
1250 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
1251 		    vm_offset_t va);
1252 
1253 static void	pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
1254 static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
1255     vm_prot_t prot, int mode, int flags);
1256 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
1257 static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
1258     vm_offset_t va, struct rwlock **lockp);
1259 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
1260     vm_offset_t va);
1261 static bool	pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
1262 		    vm_prot_t prot, struct rwlock **lockp);
1263 static int	pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
1264 		    u_int flags, vm_page_t m, struct rwlock **lockp);
1265 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
1266     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
1267 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
1268 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted);
1269 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
1270     vm_offset_t eva);
1271 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
1272     vm_offset_t eva);
1273 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
1274 		    pd_entry_t pde);
1275 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
1276 static vm_page_t pmap_large_map_getptp_unlocked(void);
1277 static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
1278 #if VM_NRESERVLEVEL > 0
1279 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
1280     struct rwlock **lockp);
1281 #endif
1282 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
1283     vm_prot_t prot);
1284 static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask);
1285 static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
1286     bool exec);
1287 static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
1288 static pd_entry_t *pmap_pti_pde(vm_offset_t va);
1289 static void pmap_pti_wire_pte(void *pte);
1290 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
1291     struct spglist *free, struct rwlock **lockp);
1292 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
1293     pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
1294 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
1295 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1296     struct spglist *free);
1297 static bool	pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1298 		    pd_entry_t *pde, struct spglist *free,
1299 		    struct rwlock **lockp);
1300 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
1301     vm_page_t m, struct rwlock **lockp);
1302 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1303     pd_entry_t newpde);
1304 static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
1305 
1306 static pd_entry_t *pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
1307 		struct rwlock **lockp);
1308 static vm_page_t pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex,
1309 		struct rwlock **lockp, vm_offset_t va);
1310 static vm_page_t pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex,
1311 		struct rwlock **lockp, vm_offset_t va);
1312 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
1313 		struct rwlock **lockp);
1314 
1315 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
1316     struct spglist *free);
1317 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
1318 
1319 static vm_page_t pmap_alloc_pt_page(pmap_t, vm_pindex_t, int);
1320 static void pmap_free_pt_page(pmap_t, vm_page_t, bool);
1321 
1322 /********************/
1323 /* Inline functions */
1324 /********************/
1325 
1326 /*
1327  * Return a non-clipped indexes for a given VA, which are page table
1328  * pages indexes at the corresponding level.
1329  */
1330 static __inline vm_pindex_t
1331 pmap_pde_pindex(vm_offset_t va)
1332 {
1333 	return (va >> PDRSHIFT);
1334 }
1335 
1336 static __inline vm_pindex_t
1337 pmap_pdpe_pindex(vm_offset_t va)
1338 {
1339 	return (NUPDE + (va >> PDPSHIFT));
1340 }
1341 
1342 static __inline vm_pindex_t
1343 pmap_pml4e_pindex(vm_offset_t va)
1344 {
1345 	return (NUPDE + NUPDPE + (va >> PML4SHIFT));
1346 }
1347 
1348 static __inline vm_pindex_t
1349 pmap_pml5e_pindex(vm_offset_t va)
1350 {
1351 	return (NUPDE + NUPDPE + NUPML4E + (va >> PML5SHIFT));
1352 }
1353 
1354 static __inline pml4_entry_t *
1355 pmap_pml5e(pmap_t pmap, vm_offset_t va)
1356 {
1357 
1358 	MPASS(pmap_is_la57(pmap));
1359 	return (&pmap->pm_pmltop[pmap_pml5e_index(va)]);
1360 }
1361 
1362 static __inline pml4_entry_t *
1363 pmap_pml5e_u(pmap_t pmap, vm_offset_t va)
1364 {
1365 
1366 	MPASS(pmap_is_la57(pmap));
1367 	return (&pmap->pm_pmltopu[pmap_pml5e_index(va)]);
1368 }
1369 
1370 static __inline pml4_entry_t *
1371 pmap_pml5e_to_pml4e(pml5_entry_t *pml5e, vm_offset_t va)
1372 {
1373 	pml4_entry_t *pml4e;
1374 
1375 	/* XXX MPASS(pmap_is_la57(pmap); */
1376 	pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1377 	return (&pml4e[pmap_pml4e_index(va)]);
1378 }
1379 
1380 /* Return a pointer to the PML4 slot that corresponds to a VA */
1381 static __inline pml4_entry_t *
1382 pmap_pml4e(pmap_t pmap, vm_offset_t va)
1383 {
1384 	pml5_entry_t *pml5e;
1385 	pml4_entry_t *pml4e;
1386 	pt_entry_t PG_V;
1387 
1388 	if (pmap_is_la57(pmap)) {
1389 		pml5e = pmap_pml5e(pmap, va);
1390 		PG_V = pmap_valid_bit(pmap);
1391 		if ((*pml5e & PG_V) == 0)
1392 			return (NULL);
1393 		pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1394 	} else {
1395 		pml4e = pmap->pm_pmltop;
1396 	}
1397 	return (&pml4e[pmap_pml4e_index(va)]);
1398 }
1399 
1400 static __inline pml4_entry_t *
1401 pmap_pml4e_u(pmap_t pmap, vm_offset_t va)
1402 {
1403 	MPASS(!pmap_is_la57(pmap));
1404 	return (&pmap->pm_pmltopu[pmap_pml4e_index(va)]);
1405 }
1406 
1407 /* Return a pointer to the PDP slot that corresponds to a VA */
1408 static __inline pdp_entry_t *
1409 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
1410 {
1411 	pdp_entry_t *pdpe;
1412 
1413 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
1414 	return (&pdpe[pmap_pdpe_index(va)]);
1415 }
1416 
1417 /* Return a pointer to the PDP slot that corresponds to a VA */
1418 static __inline pdp_entry_t *
1419 pmap_pdpe(pmap_t pmap, vm_offset_t va)
1420 {
1421 	pml4_entry_t *pml4e;
1422 	pt_entry_t PG_V;
1423 
1424 	PG_V = pmap_valid_bit(pmap);
1425 	pml4e = pmap_pml4e(pmap, va);
1426 	if (pml4e == NULL || (*pml4e & PG_V) == 0)
1427 		return (NULL);
1428 	return (pmap_pml4e_to_pdpe(pml4e, va));
1429 }
1430 
1431 /* Return a pointer to the PD slot that corresponds to a VA */
1432 static __inline pd_entry_t *
1433 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
1434 {
1435 	pd_entry_t *pde;
1436 
1437 	KASSERT((*pdpe & PG_PS) == 0,
1438 	    ("%s: pdpe %#lx is a leaf", __func__, *pdpe));
1439 	pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
1440 	return (&pde[pmap_pde_index(va)]);
1441 }
1442 
1443 /* Return a pointer to the PD slot that corresponds to a VA */
1444 static __inline pd_entry_t *
1445 pmap_pde(pmap_t pmap, vm_offset_t va)
1446 {
1447 	pdp_entry_t *pdpe;
1448 	pt_entry_t PG_V;
1449 
1450 	PG_V = pmap_valid_bit(pmap);
1451 	pdpe = pmap_pdpe(pmap, va);
1452 	if (pdpe == NULL || (*pdpe & PG_V) == 0)
1453 		return (NULL);
1454 	KASSERT((*pdpe & PG_PS) == 0,
1455 	    ("pmap_pde for 1G page, pmap %p va %#lx", pmap, va));
1456 	return (pmap_pdpe_to_pde(pdpe, va));
1457 }
1458 
1459 /* Return a pointer to the PT slot that corresponds to a VA */
1460 static __inline pt_entry_t *
1461 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
1462 {
1463 	pt_entry_t *pte;
1464 
1465 	KASSERT((*pde & PG_PS) == 0,
1466 	    ("%s: pde %#lx is a leaf", __func__, *pde));
1467 	pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
1468 	return (&pte[pmap_pte_index(va)]);
1469 }
1470 
1471 /* Return a pointer to the PT slot that corresponds to a VA */
1472 static __inline pt_entry_t *
1473 pmap_pte(pmap_t pmap, vm_offset_t va)
1474 {
1475 	pd_entry_t *pde;
1476 	pt_entry_t PG_V;
1477 
1478 	PG_V = pmap_valid_bit(pmap);
1479 	pde = pmap_pde(pmap, va);
1480 	if (pde == NULL || (*pde & PG_V) == 0)
1481 		return (NULL);
1482 	if ((*pde & PG_PS) != 0)	/* compat with i386 pmap_pte() */
1483 		return ((pt_entry_t *)pde);
1484 	return (pmap_pde_to_pte(pde, va));
1485 }
1486 
1487 static __inline void
1488 pmap_resident_count_adj(pmap_t pmap, int count)
1489 {
1490 
1491 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1492 	KASSERT(pmap->pm_stats.resident_count + count >= 0,
1493 	    ("pmap %p resident count underflow %ld %d", pmap,
1494 	    pmap->pm_stats.resident_count, count));
1495 	pmap->pm_stats.resident_count += count;
1496 }
1497 
1498 static __inline void
1499 pmap_pt_page_count_pinit(pmap_t pmap, int count)
1500 {
1501 	KASSERT(pmap->pm_stats.resident_count + count >= 0,
1502 	    ("pmap %p resident count underflow %ld %d", pmap,
1503 	    pmap->pm_stats.resident_count, count));
1504 	pmap->pm_stats.resident_count += count;
1505 }
1506 
1507 static __inline void
1508 pmap_pt_page_count_adj(pmap_t pmap, int count)
1509 {
1510 	if (pmap == kernel_pmap)
1511 		counter_u64_add(kernel_pt_page_count, count);
1512 	else {
1513 		if (pmap != NULL)
1514 			pmap_resident_count_adj(pmap, count);
1515 		counter_u64_add(user_pt_page_count, count);
1516 	}
1517 }
1518 
1519 pt_entry_t vtoptem __read_mostly = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
1520     NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1) << 3;
1521 vm_offset_t PTmap __read_mostly = (vm_offset_t)P4Tmap;
1522 
1523 PMAP_INLINE pt_entry_t *
1524 vtopte(vm_offset_t va)
1525 {
1526 	KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
1527 
1528 	return ((pt_entry_t *)(PTmap + ((va >> (PAGE_SHIFT - 3)) & vtoptem)));
1529 }
1530 
1531 pd_entry_t vtopdem __read_mostly = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
1532     NPML4EPGSHIFT)) - 1) << 3;
1533 vm_offset_t PDmap __read_mostly = (vm_offset_t)P4Dmap;
1534 
1535 static __inline pd_entry_t *
1536 vtopde(vm_offset_t va)
1537 {
1538 	KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
1539 
1540 	return ((pt_entry_t *)(PDmap + ((va >> (PDRSHIFT - 3)) & vtopdem)));
1541 }
1542 
1543 static u_int64_t
1544 allocpages(vm_paddr_t *firstaddr, int n)
1545 {
1546 	u_int64_t ret;
1547 
1548 	ret = *firstaddr;
1549 	bzero((void *)ret, n * PAGE_SIZE);
1550 	*firstaddr += n * PAGE_SIZE;
1551 	return (ret);
1552 }
1553 
1554 CTASSERT(powerof2(NDMPML4E));
1555 
1556 /* number of kernel PDP slots */
1557 #define	NKPDPE(ptpgs)		howmany(ptpgs, NPDEPG)
1558 
1559 static void
1560 nkpt_init(vm_paddr_t addr)
1561 {
1562 	int pt_pages;
1563 
1564 #ifdef NKPT
1565 	pt_pages = NKPT;
1566 #else
1567 	pt_pages = howmany(addr - kernphys, NBPDR) + 1; /* +1 for 2M hole @0 */
1568 	pt_pages += NKPDPE(pt_pages);
1569 
1570 	/*
1571 	 * Add some slop beyond the bare minimum required for bootstrapping
1572 	 * the kernel.
1573 	 *
1574 	 * This is quite important when allocating KVA for kernel modules.
1575 	 * The modules are required to be linked in the negative 2GB of
1576 	 * the address space.  If we run out of KVA in this region then
1577 	 * pmap_growkernel() will need to allocate page table pages to map
1578 	 * the entire 512GB of KVA space which is an unnecessary tax on
1579 	 * physical memory.
1580 	 *
1581 	 * Secondly, device memory mapped as part of setting up the low-
1582 	 * level console(s) is taken from KVA, starting at virtual_avail.
1583 	 * This is because cninit() is called after pmap_bootstrap() but
1584 	 * before vm_init() and pmap_init(). 20MB for a frame buffer is
1585 	 * not uncommon.
1586 	 */
1587 	pt_pages += 32;		/* 64MB additional slop. */
1588 #endif
1589 	nkpt = pt_pages;
1590 }
1591 
1592 /*
1593  * Returns the proper write/execute permission for a physical page that is
1594  * part of the initial boot allocations.
1595  *
1596  * If the page has kernel text, it is marked as read-only. If the page has
1597  * kernel read-only data, it is marked as read-only/not-executable. If the
1598  * page has only read-write data, it is marked as read-write/not-executable.
1599  * If the page is below/above the kernel range, it is marked as read-write.
1600  *
1601  * This function operates on 2M pages, since we map the kernel space that
1602  * way.
1603  */
1604 static inline pt_entry_t
1605 bootaddr_rwx(vm_paddr_t pa)
1606 {
1607 	/*
1608 	 * The kernel is loaded at a 2MB-aligned address, and memory below that
1609 	 * need not be executable.  The .bss section is padded to a 2MB
1610 	 * boundary, so memory following the kernel need not be executable
1611 	 * either.  Preloaded kernel modules have their mapping permissions
1612 	 * fixed up by the linker.
1613 	 */
1614 	if (pa < trunc_2mpage(kernphys + btext - KERNSTART) ||
1615 	    pa >= trunc_2mpage(kernphys + _end - KERNSTART))
1616 		return (X86_PG_RW | pg_nx);
1617 
1618 	/*
1619 	 * The linker should ensure that the read-only and read-write
1620 	 * portions don't share the same 2M page, so this shouldn't
1621 	 * impact read-only data. However, in any case, any page with
1622 	 * read-write data needs to be read-write.
1623 	 */
1624 	if (pa >= trunc_2mpage(kernphys + brwsection - KERNSTART))
1625 		return (X86_PG_RW | pg_nx);
1626 
1627 	/*
1628 	 * Mark any 2M page containing kernel text as read-only. Mark
1629 	 * other pages with read-only data as read-only and not executable.
1630 	 * (It is likely a small portion of the read-only data section will
1631 	 * be marked as read-only, but executable. This should be acceptable
1632 	 * since the read-only protection will keep the data from changing.)
1633 	 * Note that fixups to the .text section will still work until we
1634 	 * set CR0.WP.
1635 	 */
1636 	if (pa < round_2mpage(kernphys + etext - KERNSTART))
1637 		return (0);
1638 	return (pg_nx);
1639 }
1640 
1641 static void
1642 create_pagetables(vm_paddr_t *firstaddr)
1643 {
1644 	pd_entry_t *pd_p;
1645 	pdp_entry_t *pdp_p;
1646 	pml4_entry_t *p4_p;
1647 	uint64_t DMPDkernphys;
1648 	vm_paddr_t pax;
1649 #ifdef KASAN
1650 	pt_entry_t *pt_p;
1651 	uint64_t KASANPDphys, KASANPTphys, KASANphys;
1652 	vm_offset_t kasankernbase;
1653 	int kasankpdpi, kasankpdi, nkasanpte;
1654 #endif
1655 	int i, j, ndm1g, nkpdpe, nkdmpde;
1656 
1657 	/* Allocate page table pages for the direct map */
1658 	ndmpdp = howmany(ptoa(Maxmem), NBPDP);
1659 	if (ndmpdp < 4)		/* Minimum 4GB of dirmap */
1660 		ndmpdp = 4;
1661 	ndmpdpphys = howmany(ndmpdp, NPDPEPG);
1662 	if (ndmpdpphys > NDMPML4E) {
1663 		/*
1664 		 * Each NDMPML4E allows 512 GB, so limit to that,
1665 		 * and then readjust ndmpdp and ndmpdpphys.
1666 		 */
1667 		printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
1668 		Maxmem = atop(NDMPML4E * NBPML4);
1669 		ndmpdpphys = NDMPML4E;
1670 		ndmpdp = NDMPML4E * NPDEPG;
1671 	}
1672 	DMPDPphys = allocpages(firstaddr, ndmpdpphys);
1673 	ndm1g = 0;
1674 	if ((amd_feature & AMDID_PAGE1GB) != 0) {
1675 		/*
1676 		 * Calculate the number of 1G pages that will fully fit in
1677 		 * Maxmem.
1678 		 */
1679 		ndm1g = ptoa(Maxmem) >> PDPSHIFT;
1680 
1681 		/*
1682 		 * Allocate 2M pages for the kernel. These will be used in
1683 		 * place of the one or more 1G pages from ndm1g that maps
1684 		 * kernel memory into DMAP.
1685 		 */
1686 		nkdmpde = howmany((vm_offset_t)brwsection - KERNSTART +
1687 		    kernphys - rounddown2(kernphys, NBPDP), NBPDP);
1688 		DMPDkernphys = allocpages(firstaddr, nkdmpde);
1689 	}
1690 	if (ndm1g < ndmpdp)
1691 		DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
1692 	dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
1693 
1694 	/* Allocate pages. */
1695 	KPML4phys = allocpages(firstaddr, 1);
1696 	KPDPphys = allocpages(firstaddr, NKPML4E);
1697 #ifdef KASAN
1698 	KASANPDPphys = allocpages(firstaddr, NKASANPML4E);
1699 	KASANPDphys = allocpages(firstaddr, 1);
1700 #endif
1701 #ifdef KMSAN
1702 	/*
1703 	 * The KMSAN shadow maps are initially left unpopulated, since there is
1704 	 * no need to shadow memory above KERNBASE.
1705 	 */
1706 	KMSANSHADPDPphys = allocpages(firstaddr, NKMSANSHADPML4E);
1707 	KMSANORIGPDPphys = allocpages(firstaddr, NKMSANORIGPML4E);
1708 #endif
1709 
1710 	/*
1711 	 * Allocate the initial number of kernel page table pages required to
1712 	 * bootstrap.  We defer this until after all memory-size dependent
1713 	 * allocations are done (e.g. direct map), so that we don't have to
1714 	 * build in too much slop in our estimate.
1715 	 *
1716 	 * Note that when NKPML4E > 1, we have an empty page underneath
1717 	 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
1718 	 * pages.  (pmap_enter requires a PD page to exist for each KPML4E.)
1719 	 */
1720 	nkpt_init(*firstaddr);
1721 	nkpdpe = NKPDPE(nkpt);
1722 
1723 	KPTphys = allocpages(firstaddr, nkpt);
1724 	KPDphys = allocpages(firstaddr, nkpdpe);
1725 
1726 #ifdef KASAN
1727 	nkasanpte = howmany(nkpt, KASAN_SHADOW_SCALE);
1728 	KASANPTphys = allocpages(firstaddr, nkasanpte);
1729 	KASANphys = allocpages(firstaddr, nkasanpte * NPTEPG);
1730 #endif
1731 
1732 	/*
1733 	 * Connect the zero-filled PT pages to their PD entries.  This
1734 	 * implicitly maps the PT pages at their correct locations within
1735 	 * the PTmap.
1736 	 */
1737 	pd_p = (pd_entry_t *)KPDphys;
1738 	for (i = 0; i < nkpt; i++)
1739 		pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1740 
1741 	/*
1742 	 * Map from start of the kernel in physical memory (staging
1743 	 * area) to the end of loader preallocated memory using 2MB
1744 	 * pages.  This replaces some of the PD entries created above.
1745 	 * For compatibility, identity map 2M at the start.
1746 	 */
1747 	pd_p[0] = X86_PG_V | PG_PS | pg_g | X86_PG_M | X86_PG_A |
1748 	    X86_PG_RW | pg_nx;
1749 	for (i = 1, pax = kernphys; pax < KERNend; i++, pax += NBPDR) {
1750 		/* Preset PG_M and PG_A because demotion expects it. */
1751 		pd_p[i] = pax | X86_PG_V | PG_PS | pg_g | X86_PG_M |
1752 		    X86_PG_A | bootaddr_rwx(pax);
1753 	}
1754 
1755 	/*
1756 	 * Because we map the physical blocks in 2M pages, adjust firstaddr
1757 	 * to record the physical blocks we've actually mapped into kernel
1758 	 * virtual address space.
1759 	 */
1760 	if (*firstaddr < round_2mpage(KERNend))
1761 		*firstaddr = round_2mpage(KERNend);
1762 
1763 	/* And connect up the PD to the PDP (leaving room for L4 pages) */
1764 	pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
1765 	for (i = 0; i < nkpdpe; i++)
1766 		pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1767 
1768 #ifdef KASAN
1769 	kasankernbase = kasan_md_addr_to_shad(KERNBASE);
1770 	kasankpdpi = pmap_pdpe_index(kasankernbase);
1771 	kasankpdi = pmap_pde_index(kasankernbase);
1772 
1773 	pdp_p = (pdp_entry_t *)KASANPDPphys;
1774 	pdp_p[kasankpdpi] = (KASANPDphys | X86_PG_RW | X86_PG_V | pg_nx);
1775 
1776 	pd_p = (pd_entry_t *)KASANPDphys;
1777 	for (i = 0; i < nkasanpte; i++)
1778 		pd_p[i + kasankpdi] = (KASANPTphys + ptoa(i)) | X86_PG_RW |
1779 		    X86_PG_V | pg_nx;
1780 
1781 	pt_p = (pt_entry_t *)KASANPTphys;
1782 	for (i = 0; i < nkasanpte * NPTEPG; i++)
1783 		pt_p[i] = (KASANphys + ptoa(i)) | X86_PG_RW | X86_PG_V |
1784 		    X86_PG_M | X86_PG_A | pg_nx;
1785 #endif
1786 
1787 	/*
1788 	 * Now, set up the direct map region using 2MB and/or 1GB pages.  If
1789 	 * the end of physical memory is not aligned to a 1GB page boundary,
1790 	 * then the residual physical memory is mapped with 2MB pages.  Later,
1791 	 * if pmap_mapdev{_attr}() uses the direct map for non-write-back
1792 	 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
1793 	 * that are partially used.
1794 	 */
1795 	pd_p = (pd_entry_t *)DMPDphys;
1796 	for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
1797 		pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
1798 		/* Preset PG_M and PG_A because demotion expects it. */
1799 		pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1800 		    X86_PG_M | X86_PG_A | pg_nx;
1801 	}
1802 	pdp_p = (pdp_entry_t *)DMPDPphys;
1803 	for (i = 0; i < ndm1g; i++) {
1804 		pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
1805 		/* Preset PG_M and PG_A because demotion expects it. */
1806 		pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1807 		    X86_PG_M | X86_PG_A | pg_nx;
1808 	}
1809 	for (j = 0; i < ndmpdp; i++, j++) {
1810 		pdp_p[i] = DMPDphys + ptoa(j);
1811 		pdp_p[i] |= X86_PG_RW | X86_PG_V | pg_nx;
1812 	}
1813 
1814 	/*
1815 	 * Instead of using a 1G page for the memory containing the kernel,
1816 	 * use 2M pages with read-only and no-execute permissions.  (If using 1G
1817 	 * pages, this will partially overwrite the PDPEs above.)
1818 	 */
1819 	if (ndm1g > 0) {
1820 		pd_p = (pd_entry_t *)DMPDkernphys;
1821 		for (i = 0, pax = rounddown2(kernphys, NBPDP);
1822 		    i < NPDEPG * nkdmpde; i++, pax += NBPDR) {
1823 			pd_p[i] = pax | X86_PG_V | PG_PS | pg_g | X86_PG_M |
1824 			    X86_PG_A | pg_nx | bootaddr_rwx(pax);
1825 		}
1826 		j = rounddown2(kernphys, NBPDP) >> PDPSHIFT;
1827 		for (i = 0; i < nkdmpde; i++) {
1828 			pdp_p[i + j] = (DMPDkernphys + ptoa(i)) |
1829 			    X86_PG_RW | X86_PG_V | pg_nx;
1830 		}
1831 	}
1832 
1833 	/* And recursively map PML4 to itself in order to get PTmap */
1834 	p4_p = (pml4_entry_t *)KPML4phys;
1835 	p4_p[PML4PML4I] = KPML4phys;
1836 	p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | pg_nx;
1837 
1838 #ifdef KASAN
1839 	/* Connect the KASAN shadow map slots up to the PML4. */
1840 	for (i = 0; i < NKASANPML4E; i++) {
1841 		p4_p[KASANPML4I + i] = KASANPDPphys + ptoa(i);
1842 		p4_p[KASANPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1843 	}
1844 #endif
1845 
1846 #ifdef KMSAN
1847 	/* Connect the KMSAN shadow map slots up to the PML4. */
1848 	for (i = 0; i < NKMSANSHADPML4E; i++) {
1849 		p4_p[KMSANSHADPML4I + i] = KMSANSHADPDPphys + ptoa(i);
1850 		p4_p[KMSANSHADPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1851 	}
1852 
1853 	/* Connect the KMSAN origin map slots up to the PML4. */
1854 	for (i = 0; i < NKMSANORIGPML4E; i++) {
1855 		p4_p[KMSANORIGPML4I + i] = KMSANORIGPDPphys + ptoa(i);
1856 		p4_p[KMSANORIGPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1857 	}
1858 #endif
1859 
1860 	/* Connect the Direct Map slots up to the PML4. */
1861 	for (i = 0; i < ndmpdpphys; i++) {
1862 		p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
1863 		p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1864 	}
1865 
1866 	/* Connect the KVA slots up to the PML4 */
1867 	for (i = 0; i < NKPML4E; i++) {
1868 		p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
1869 		p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V;
1870 	}
1871 
1872 	kernel_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
1873 }
1874 
1875 /*
1876  *	Bootstrap the system enough to run with virtual memory.
1877  *
1878  *	On amd64 this is called after mapping has already been enabled
1879  *	and just syncs the pmap module with what has already been done.
1880  *	[We can't call it easily with mapping off since the kernel is not
1881  *	mapped with PA == VA, hence we would have to relocate every address
1882  *	from the linked base (virtual) address "KERNBASE" to the actual
1883  *	(physical) address starting relative to 0]
1884  */
1885 void
1886 pmap_bootstrap(vm_paddr_t *firstaddr)
1887 {
1888 	vm_offset_t va;
1889 	pt_entry_t *pte, *pcpu_pte;
1890 	struct region_descriptor r_gdt;
1891 	uint64_t cr4, pcpu_phys;
1892 	u_long res;
1893 	int i;
1894 
1895 	KERNend = *firstaddr;
1896 	res = atop(KERNend - (vm_paddr_t)kernphys);
1897 
1898 	if (!pti)
1899 		pg_g = X86_PG_G;
1900 
1901 	/*
1902 	 * Create an initial set of page tables to run the kernel in.
1903 	 */
1904 	create_pagetables(firstaddr);
1905 
1906 	pcpu_phys = allocpages(firstaddr, MAXCPU);
1907 
1908 	/*
1909 	 * Add a physical memory segment (vm_phys_seg) corresponding to the
1910 	 * preallocated kernel page table pages so that vm_page structures
1911 	 * representing these pages will be created.  The vm_page structures
1912 	 * are required for promotion of the corresponding kernel virtual
1913 	 * addresses to superpage mappings.
1914 	 */
1915 	vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1916 
1917 	/*
1918 	 * Account for the virtual addresses mapped by create_pagetables().
1919 	 */
1920 	virtual_avail = (vm_offset_t)KERNSTART + round_2mpage(KERNend -
1921 	    (vm_paddr_t)kernphys);
1922 	virtual_end = VM_MAX_KERNEL_ADDRESS;
1923 
1924 	/*
1925 	 * Enable PG_G global pages, then switch to the kernel page
1926 	 * table from the bootstrap page table.  After the switch, it
1927 	 * is possible to enable SMEP and SMAP since PG_U bits are
1928 	 * correct now.
1929 	 */
1930 	cr4 = rcr4();
1931 	cr4 |= CR4_PGE;
1932 	load_cr4(cr4);
1933 	load_cr3(KPML4phys);
1934 	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1935 		cr4 |= CR4_SMEP;
1936 	if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
1937 		cr4 |= CR4_SMAP;
1938 	load_cr4(cr4);
1939 
1940 	/*
1941 	 * Initialize the kernel pmap (which is statically allocated).
1942 	 * Count bootstrap data as being resident in case any of this data is
1943 	 * later unmapped (using pmap_remove()) and freed.
1944 	 */
1945 	PMAP_LOCK_INIT(kernel_pmap);
1946 	kernel_pmap->pm_pmltop = kernel_pml4;
1947 	kernel_pmap->pm_cr3 = KPML4phys;
1948 	kernel_pmap->pm_ucr3 = PMAP_NO_CR3;
1949 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1950 	kernel_pmap->pm_stats.resident_count = res;
1951 	kernel_pmap->pm_flags = pmap_flags;
1952 
1953 	/*
1954 	 * The kernel pmap is always active on all CPUs.  Once CPUs are
1955 	 * enumerated, the mask will be set equal to all_cpus.
1956 	 */
1957 	CPU_FILL(&kernel_pmap->pm_active);
1958 
1959  	/*
1960 	 * Initialize the TLB invalidations generation number lock.
1961 	 */
1962 	mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
1963 
1964 	/*
1965 	 * Reserve some special page table entries/VA space for temporary
1966 	 * mapping of pages.
1967 	 */
1968 #define	SYSMAP(c, p, v, n)	\
1969 	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1970 
1971 	va = virtual_avail;
1972 	pte = vtopte(va);
1973 
1974 	/*
1975 	 * Crashdump maps.  The first page is reused as CMAP1 for the
1976 	 * memory test.
1977 	 */
1978 	SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
1979 	CADDR1 = crashdumpmap;
1980 
1981 	SYSMAP(struct pcpu *, pcpu_pte, __pcpu, MAXCPU);
1982 	virtual_avail = va;
1983 
1984 	for (i = 0; i < MAXCPU; i++) {
1985 		pcpu_pte[i] = (pcpu_phys + ptoa(i)) | X86_PG_V | X86_PG_RW |
1986 		    pg_g | pg_nx | X86_PG_M | X86_PG_A;
1987 	}
1988 
1989 	/*
1990 	 * Re-initialize PCPU area for BSP after switching.
1991 	 * Make hardware use gdt and common_tss from the new PCPU.
1992 	 */
1993 	STAILQ_INIT(&cpuhead);
1994 	wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
1995 	pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu));
1996 	amd64_bsp_pcpu_init1(&__pcpu[0]);
1997 	amd64_bsp_ist_init(&__pcpu[0]);
1998 	__pcpu[0].pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
1999 	    IOPERM_BITMAP_SIZE;
2000 	memcpy(__pcpu[0].pc_gdt, temp_bsp_pcpu.pc_gdt, NGDT *
2001 	    sizeof(struct user_segment_descriptor));
2002 	gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&__pcpu[0].pc_common_tss;
2003 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
2004 	    (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
2005 	r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
2006 	r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
2007 	lgdt(&r_gdt);
2008 	wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2009 	ltr(GSEL(GPROC0_SEL, SEL_KPL));
2010 	__pcpu[0].pc_dynamic = temp_bsp_pcpu.pc_dynamic;
2011 	__pcpu[0].pc_acpi_id = temp_bsp_pcpu.pc_acpi_id;
2012 
2013 	/*
2014 	 * Initialize the PAT MSR.
2015 	 * pmap_init_pat() clears and sets CR4_PGE, which, as a
2016 	 * side-effect, invalidates stale PG_G TLB entries that might
2017 	 * have been created in our pre-boot environment.
2018 	 */
2019 	pmap_init_pat();
2020 
2021 	/* Initialize TLB Context Id. */
2022 	if (pmap_pcid_enabled) {
2023 		for (i = 0; i < MAXCPU; i++) {
2024 			kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN;
2025 			kernel_pmap->pm_pcids[i].pm_gen = 1;
2026 		}
2027 
2028 		/*
2029 		 * PMAP_PCID_KERN + 1 is used for initialization of
2030 		 * proc0 pmap.  The pmap' pcid state might be used by
2031 		 * EFIRT entry before first context switch, so it
2032 		 * needs to be valid.
2033 		 */
2034 		PCPU_SET(pcid_next, PMAP_PCID_KERN + 2);
2035 		PCPU_SET(pcid_gen, 1);
2036 
2037 		/*
2038 		 * pcpu area for APs is zeroed during AP startup.
2039 		 * pc_pcid_next and pc_pcid_gen are initialized by AP
2040 		 * during pcpu setup.
2041 		 */
2042 		load_cr4(rcr4() | CR4_PCIDE);
2043 	}
2044 }
2045 
2046 /*
2047  * Setup the PAT MSR.
2048  */
2049 void
2050 pmap_init_pat(void)
2051 {
2052 	uint64_t pat_msr;
2053 	u_long cr0, cr4;
2054 	int i;
2055 
2056 	/* Bail if this CPU doesn't implement PAT. */
2057 	if ((cpu_feature & CPUID_PAT) == 0)
2058 		panic("no PAT??");
2059 
2060 	/* Set default PAT index table. */
2061 	for (i = 0; i < PAT_INDEX_SIZE; i++)
2062 		pat_index[i] = -1;
2063 	pat_index[PAT_WRITE_BACK] = 0;
2064 	pat_index[PAT_WRITE_THROUGH] = 1;
2065 	pat_index[PAT_UNCACHEABLE] = 3;
2066 	pat_index[PAT_WRITE_COMBINING] = 6;
2067 	pat_index[PAT_WRITE_PROTECTED] = 5;
2068 	pat_index[PAT_UNCACHED] = 2;
2069 
2070 	/*
2071 	 * Initialize default PAT entries.
2072 	 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
2073 	 * Program 5 and 6 as WP and WC.
2074 	 *
2075 	 * Leave 4 and 7 as WB and UC.  Note that a recursive page table
2076 	 * mapping for a 2M page uses a PAT value with the bit 3 set due
2077 	 * to its overload with PG_PS.
2078 	 */
2079 	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
2080 	    PAT_VALUE(1, PAT_WRITE_THROUGH) |
2081 	    PAT_VALUE(2, PAT_UNCACHED) |
2082 	    PAT_VALUE(3, PAT_UNCACHEABLE) |
2083 	    PAT_VALUE(4, PAT_WRITE_BACK) |
2084 	    PAT_VALUE(5, PAT_WRITE_PROTECTED) |
2085 	    PAT_VALUE(6, PAT_WRITE_COMBINING) |
2086 	    PAT_VALUE(7, PAT_UNCACHEABLE);
2087 
2088 	/* Disable PGE. */
2089 	cr4 = rcr4();
2090 	load_cr4(cr4 & ~CR4_PGE);
2091 
2092 	/* Disable caches (CD = 1, NW = 0). */
2093 	cr0 = rcr0();
2094 	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
2095 
2096 	/* Flushes caches and TLBs. */
2097 	wbinvd();
2098 	invltlb();
2099 
2100 	/* Update PAT and index table. */
2101 	wrmsr(MSR_PAT, pat_msr);
2102 
2103 	/* Flush caches and TLBs again. */
2104 	wbinvd();
2105 	invltlb();
2106 
2107 	/* Restore caches and PGE. */
2108 	load_cr0(cr0);
2109 	load_cr4(cr4);
2110 }
2111 
2112 vm_page_t
2113 pmap_page_alloc_below_4g(bool zeroed)
2114 {
2115 	return (vm_page_alloc_noobj_contig((zeroed ? VM_ALLOC_ZERO : 0),
2116 	    1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT));
2117 }
2118 
2119 extern const char la57_trampoline[], la57_trampoline_gdt_desc[],
2120     la57_trampoline_gdt[], la57_trampoline_end[];
2121 
2122 static void
2123 pmap_bootstrap_la57(void *arg __unused)
2124 {
2125 	char *v_code;
2126 	pml5_entry_t *v_pml5;
2127 	pml4_entry_t *v_pml4;
2128 	pdp_entry_t *v_pdp;
2129 	pd_entry_t *v_pd;
2130 	pt_entry_t *v_pt;
2131 	vm_page_t m_code, m_pml4, m_pdp, m_pd, m_pt, m_pml5;
2132 	void (*la57_tramp)(uint64_t pml5);
2133 	struct region_descriptor r_gdt;
2134 
2135 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_LA57) == 0)
2136 		return;
2137 	TUNABLE_INT_FETCH("vm.pmap.la57", &la57);
2138 	if (!la57)
2139 		return;
2140 
2141 	r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
2142 	r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
2143 
2144 	m_code = pmap_page_alloc_below_4g(true);
2145 	v_code = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_code));
2146 	m_pml5 = pmap_page_alloc_below_4g(true);
2147 	KPML5phys = VM_PAGE_TO_PHYS(m_pml5);
2148 	v_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(KPML5phys);
2149 	m_pml4 = pmap_page_alloc_below_4g(true);
2150 	v_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
2151 	m_pdp = pmap_page_alloc_below_4g(true);
2152 	v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
2153 	m_pd = pmap_page_alloc_below_4g(true);
2154 	v_pd = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd));
2155 	m_pt = pmap_page_alloc_below_4g(true);
2156 	v_pt = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pt));
2157 
2158 	/*
2159 	 * Map m_code 1:1, it appears below 4G in KVA due to physical
2160 	 * address being below 4G.  Since kernel KVA is in upper half,
2161 	 * the pml4e should be zero and free for temporary use.
2162 	 */
2163 	kernel_pmap->pm_pmltop[pmap_pml4e_index(VM_PAGE_TO_PHYS(m_code))] =
2164 	    VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V | X86_PG_RW | X86_PG_A |
2165 	    X86_PG_M;
2166 	v_pdp[pmap_pdpe_index(VM_PAGE_TO_PHYS(m_code))] =
2167 	    VM_PAGE_TO_PHYS(m_pd) | X86_PG_V | X86_PG_RW | X86_PG_A |
2168 	    X86_PG_M;
2169 	v_pd[pmap_pde_index(VM_PAGE_TO_PHYS(m_code))] =
2170 	    VM_PAGE_TO_PHYS(m_pt) | X86_PG_V | X86_PG_RW | X86_PG_A |
2171 	    X86_PG_M;
2172 	v_pt[pmap_pte_index(VM_PAGE_TO_PHYS(m_code))] =
2173 	    VM_PAGE_TO_PHYS(m_code) | X86_PG_V | X86_PG_RW | X86_PG_A |
2174 	    X86_PG_M;
2175 
2176 	/*
2177 	 * Add pml5 entry at top of KVA pointing to existing pml4 table,
2178 	 * entering all existing kernel mappings into level 5 table.
2179 	 */
2180 	v_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
2181 	    X86_PG_RW | X86_PG_A | X86_PG_M | pg_g;
2182 
2183 	/*
2184 	 * Add pml5 entry for 1:1 trampoline mapping after LA57 is turned on.
2185 	 */
2186 	v_pml5[pmap_pml5e_index(VM_PAGE_TO_PHYS(m_code))] =
2187 	    VM_PAGE_TO_PHYS(m_pml4) | X86_PG_V | X86_PG_RW | X86_PG_A |
2188 	    X86_PG_M;
2189 	v_pml4[pmap_pml4e_index(VM_PAGE_TO_PHYS(m_code))] =
2190 	    VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V | X86_PG_RW | X86_PG_A |
2191 	    X86_PG_M;
2192 
2193 	/*
2194 	 * Copy and call the 48->57 trampoline, hope we return there, alive.
2195 	 */
2196 	bcopy(la57_trampoline, v_code, la57_trampoline_end - la57_trampoline);
2197 	*(u_long *)(v_code + 2 + (la57_trampoline_gdt_desc - la57_trampoline)) =
2198 	    la57_trampoline_gdt - la57_trampoline + VM_PAGE_TO_PHYS(m_code);
2199 	la57_tramp = (void (*)(uint64_t))VM_PAGE_TO_PHYS(m_code);
2200 	invlpg((vm_offset_t)la57_tramp);
2201 	la57_tramp(KPML5phys);
2202 
2203 	/*
2204 	 * gdt was necessary reset, switch back to our gdt.
2205 	 */
2206 	lgdt(&r_gdt);
2207 	wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2208 	load_ds(_udatasel);
2209 	load_es(_udatasel);
2210 	load_fs(_ufssel);
2211 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
2212 	    (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
2213 	ltr(GSEL(GPROC0_SEL, SEL_KPL));
2214 
2215 	/*
2216 	 * Now unmap the trampoline, and free the pages.
2217 	 * Clear pml5 entry used for 1:1 trampoline mapping.
2218 	 */
2219 	pte_clear(&v_pml5[pmap_pml5e_index(VM_PAGE_TO_PHYS(m_code))]);
2220 	invlpg((vm_offset_t)v_code);
2221 	vm_page_free(m_code);
2222 	vm_page_free(m_pdp);
2223 	vm_page_free(m_pd);
2224 	vm_page_free(m_pt);
2225 
2226 	/*
2227 	 * Recursively map PML5 to itself in order to get PTmap and
2228 	 * PDmap.
2229 	 */
2230 	v_pml5[PML5PML5I] = KPML5phys | X86_PG_RW | X86_PG_V | pg_nx;
2231 
2232 	vtoptem = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT +
2233 	    NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1) << 3;
2234 	PTmap = (vm_offset_t)P5Tmap;
2235 	vtopdem = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
2236 	    NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1) << 3;
2237 	PDmap = (vm_offset_t)P5Dmap;
2238 
2239 	kernel_pmap->pm_cr3 = KPML5phys;
2240 	kernel_pmap->pm_pmltop = v_pml5;
2241 	pmap_pt_page_count_adj(kernel_pmap, 1);
2242 }
2243 SYSINIT(la57, SI_SUB_KMEM, SI_ORDER_ANY, pmap_bootstrap_la57, NULL);
2244 
2245 /*
2246  *	Initialize a vm_page's machine-dependent fields.
2247  */
2248 void
2249 pmap_page_init(vm_page_t m)
2250 {
2251 
2252 	TAILQ_INIT(&m->md.pv_list);
2253 	m->md.pat_mode = PAT_WRITE_BACK;
2254 }
2255 
2256 static int pmap_allow_2m_x_ept;
2257 SYSCTL_INT(_vm_pmap, OID_AUTO, allow_2m_x_ept, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
2258     &pmap_allow_2m_x_ept, 0,
2259     "Allow executable superpage mappings in EPT");
2260 
2261 void
2262 pmap_allow_2m_x_ept_recalculate(void)
2263 {
2264 	/*
2265 	 * SKL002, SKL012S.  Since the EPT format is only used by
2266 	 * Intel CPUs, the vendor check is merely a formality.
2267 	 */
2268 	if (!(cpu_vendor_id != CPU_VENDOR_INTEL ||
2269 	    (cpu_ia32_arch_caps & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0 ||
2270 	    (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
2271 	    (CPUID_TO_MODEL(cpu_id) == 0x26 ||	/* Atoms */
2272 	    CPUID_TO_MODEL(cpu_id) == 0x27 ||
2273 	    CPUID_TO_MODEL(cpu_id) == 0x35 ||
2274 	    CPUID_TO_MODEL(cpu_id) == 0x36 ||
2275 	    CPUID_TO_MODEL(cpu_id) == 0x37 ||
2276 	    CPUID_TO_MODEL(cpu_id) == 0x86 ||
2277 	    CPUID_TO_MODEL(cpu_id) == 0x1c ||
2278 	    CPUID_TO_MODEL(cpu_id) == 0x4a ||
2279 	    CPUID_TO_MODEL(cpu_id) == 0x4c ||
2280 	    CPUID_TO_MODEL(cpu_id) == 0x4d ||
2281 	    CPUID_TO_MODEL(cpu_id) == 0x5a ||
2282 	    CPUID_TO_MODEL(cpu_id) == 0x5c ||
2283 	    CPUID_TO_MODEL(cpu_id) == 0x5d ||
2284 	    CPUID_TO_MODEL(cpu_id) == 0x5f ||
2285 	    CPUID_TO_MODEL(cpu_id) == 0x6e ||
2286 	    CPUID_TO_MODEL(cpu_id) == 0x7a ||
2287 	    CPUID_TO_MODEL(cpu_id) == 0x57 ||	/* Knights */
2288 	    CPUID_TO_MODEL(cpu_id) == 0x85))))
2289 		pmap_allow_2m_x_ept = 1;
2290 	TUNABLE_INT_FETCH("hw.allow_2m_x_ept", &pmap_allow_2m_x_ept);
2291 }
2292 
2293 static bool
2294 pmap_allow_2m_x_page(pmap_t pmap, bool executable)
2295 {
2296 
2297 	return (pmap->pm_type != PT_EPT || !executable ||
2298 	    !pmap_allow_2m_x_ept);
2299 }
2300 
2301 #ifdef NUMA
2302 static void
2303 pmap_init_pv_table(void)
2304 {
2305 	struct pmap_large_md_page *pvd;
2306 	vm_size_t s;
2307 	long start, end, highest, pv_npg;
2308 	int domain, i, j, pages;
2309 
2310 	/*
2311 	 * We strongly depend on the size being a power of two, so the assert
2312 	 * is overzealous. However, should the struct be resized to a
2313 	 * different power of two, the code below needs to be revisited.
2314 	 */
2315 	CTASSERT((sizeof(*pvd) == 64));
2316 
2317 	/*
2318 	 * Calculate the size of the array.
2319 	 */
2320 	pmap_last_pa = vm_phys_segs[vm_phys_nsegs - 1].end;
2321 	pv_npg = howmany(pmap_last_pa, NBPDR);
2322 	s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
2323 	s = round_page(s);
2324 	pv_table = (struct pmap_large_md_page *)kva_alloc(s);
2325 	if (pv_table == NULL)
2326 		panic("%s: kva_alloc failed\n", __func__);
2327 
2328 	/*
2329 	 * Iterate physical segments to allocate space for respective pages.
2330 	 */
2331 	highest = -1;
2332 	s = 0;
2333 	for (i = 0; i < vm_phys_nsegs; i++) {
2334 		end = vm_phys_segs[i].end / NBPDR;
2335 		domain = vm_phys_segs[i].domain;
2336 
2337 		if (highest >= end)
2338 			continue;
2339 
2340 		start = highest + 1;
2341 		pvd = &pv_table[start];
2342 
2343 		pages = end - start + 1;
2344 		s = round_page(pages * sizeof(*pvd));
2345 		highest = start + (s / sizeof(*pvd)) - 1;
2346 
2347 		for (j = 0; j < s; j += PAGE_SIZE) {
2348 			vm_page_t m = vm_page_alloc_noobj_domain(domain, 0);
2349 			if (m == NULL)
2350 				panic("failed to allocate PV table page");
2351 			pmap_qenter((vm_offset_t)pvd + j, &m, 1);
2352 		}
2353 
2354 		for (j = 0; j < s / sizeof(*pvd); j++) {
2355 			rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW);
2356 			TAILQ_INIT(&pvd->pv_page.pv_list);
2357 			pvd->pv_page.pv_gen = 0;
2358 			pvd->pv_page.pat_mode = 0;
2359 			pvd->pv_invl_gen = 0;
2360 			pvd++;
2361 		}
2362 	}
2363 	pvd = &pv_dummy_large;
2364 	rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW);
2365 	TAILQ_INIT(&pvd->pv_page.pv_list);
2366 	pvd->pv_page.pv_gen = 0;
2367 	pvd->pv_page.pat_mode = 0;
2368 	pvd->pv_invl_gen = 0;
2369 }
2370 #else
2371 static void
2372 pmap_init_pv_table(void)
2373 {
2374 	vm_size_t s;
2375 	long i, pv_npg;
2376 
2377 	/*
2378 	 * Initialize the pool of pv list locks.
2379 	 */
2380 	for (i = 0; i < NPV_LIST_LOCKS; i++)
2381 		rw_init(&pv_list_locks[i], "pmap pv list");
2382 
2383 	/*
2384 	 * Calculate the size of the pv head table for superpages.
2385 	 */
2386 	pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
2387 
2388 	/*
2389 	 * Allocate memory for the pv head table for superpages.
2390 	 */
2391 	s = (vm_size_t)pv_npg * sizeof(struct md_page);
2392 	s = round_page(s);
2393 	pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
2394 	for (i = 0; i < pv_npg; i++)
2395 		TAILQ_INIT(&pv_table[i].pv_list);
2396 	TAILQ_INIT(&pv_dummy.pv_list);
2397 }
2398 #endif
2399 
2400 /*
2401  *	Initialize the pmap module.
2402  *	Called by vm_init, to initialize any structures that the pmap
2403  *	system needs to map virtual memory.
2404  */
2405 void
2406 pmap_init(void)
2407 {
2408 	struct pmap_preinit_mapping *ppim;
2409 	vm_page_t m, mpte;
2410 	int error, i, ret, skz63;
2411 
2412 	/* L1TF, reserve page @0 unconditionally */
2413 	vm_page_blacklist_add(0, bootverbose);
2414 
2415 	/* Detect bare-metal Skylake Server and Skylake-X. */
2416 	if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_INTEL &&
2417 	    CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x55) {
2418 		/*
2419 		 * Skylake-X errata SKZ63. Processor May Hang When
2420 		 * Executing Code In an HLE Transaction Region between
2421 		 * 40000000H and 403FFFFFH.
2422 		 *
2423 		 * Mark the pages in the range as preallocated.  It
2424 		 * seems to be impossible to distinguish between
2425 		 * Skylake Server and Skylake X.
2426 		 */
2427 		skz63 = 1;
2428 		TUNABLE_INT_FETCH("hw.skz63_enable", &skz63);
2429 		if (skz63 != 0) {
2430 			if (bootverbose)
2431 				printf("SKZ63: skipping 4M RAM starting "
2432 				    "at physical 1G\n");
2433 			for (i = 0; i < atop(0x400000); i++) {
2434 				ret = vm_page_blacklist_add(0x40000000 +
2435 				    ptoa(i), FALSE);
2436 				if (!ret && bootverbose)
2437 					printf("page at %#lx already used\n",
2438 					    0x40000000 + ptoa(i));
2439 			}
2440 		}
2441 	}
2442 
2443 	/* IFU */
2444 	pmap_allow_2m_x_ept_recalculate();
2445 
2446 	/*
2447 	 * Initialize the vm page array entries for the kernel pmap's
2448 	 * page table pages.
2449 	 */
2450 	PMAP_LOCK(kernel_pmap);
2451 	for (i = 0; i < nkpt; i++) {
2452 		mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
2453 		KASSERT(mpte >= vm_page_array &&
2454 		    mpte < &vm_page_array[vm_page_array_size],
2455 		    ("pmap_init: page table page is out of range"));
2456 		mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
2457 		mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
2458 		mpte->ref_count = 1;
2459 
2460 		/*
2461 		 * Collect the page table pages that were replaced by a 2MB
2462 		 * page in create_pagetables().  They are zero filled.
2463 		 */
2464 		if ((i == 0 ||
2465 		    kernphys + ((vm_paddr_t)(i - 1) << PDRSHIFT) < KERNend) &&
2466 		    pmap_insert_pt_page(kernel_pmap, mpte, false))
2467 			panic("pmap_init: pmap_insert_pt_page failed");
2468 	}
2469 	PMAP_UNLOCK(kernel_pmap);
2470 	vm_wire_add(nkpt);
2471 
2472 	/*
2473 	 * If the kernel is running on a virtual machine, then it must assume
2474 	 * that MCA is enabled by the hypervisor.  Moreover, the kernel must
2475 	 * be prepared for the hypervisor changing the vendor and family that
2476 	 * are reported by CPUID.  Consequently, the workaround for AMD Family
2477 	 * 10h Erratum 383 is enabled if the processor's feature set does not
2478 	 * include at least one feature that is only supported by older Intel
2479 	 * or newer AMD processors.
2480 	 */
2481 	if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
2482 	    (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
2483 	    CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
2484 	    AMDID2_FMA4)) == 0)
2485 		workaround_erratum383 = 1;
2486 
2487 	/*
2488 	 * Are large page mappings enabled?
2489 	 */
2490 	TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
2491 	if (pg_ps_enabled) {
2492 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
2493 		    ("pmap_init: can't assign to pagesizes[1]"));
2494 		pagesizes[1] = NBPDR;
2495 		if ((amd_feature & AMDID_PAGE1GB) != 0) {
2496 			KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
2497 			    ("pmap_init: can't assign to pagesizes[2]"));
2498 			pagesizes[2] = NBPDP;
2499 		}
2500 	}
2501 
2502 	/*
2503 	 * Initialize pv chunk lists.
2504 	 */
2505 	for (i = 0; i < PMAP_MEMDOM; i++) {
2506 		mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL, MTX_DEF);
2507 		TAILQ_INIT(&pv_chunks[i].pvc_list);
2508 	}
2509 	pmap_init_pv_table();
2510 
2511 	pmap_initialized = 1;
2512 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
2513 		ppim = pmap_preinit_mapping + i;
2514 		if (ppim->va == 0)
2515 			continue;
2516 		/* Make the direct map consistent */
2517 		if (ppim->pa < dmaplimit && ppim->pa + ppim->sz <= dmaplimit) {
2518 			(void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
2519 			    ppim->sz, ppim->mode);
2520 		}
2521 		if (!bootverbose)
2522 			continue;
2523 		printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
2524 		    ppim->pa, ppim->va, ppim->sz, ppim->mode);
2525 	}
2526 
2527 	mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
2528 	error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
2529 	    (vmem_addr_t *)&qframe);
2530 	if (error != 0)
2531 		panic("qframe allocation failed");
2532 
2533 	lm_ents = 8;
2534 	TUNABLE_INT_FETCH("vm.pmap.large_map_pml4_entries", &lm_ents);
2535 	if (lm_ents > LMEPML4I - LMSPML4I + 1)
2536 		lm_ents = LMEPML4I - LMSPML4I + 1;
2537 #ifdef KMSAN
2538 	if (lm_ents > KMSANORIGPML4I - LMSPML4I) {
2539 		printf(
2540 	    "pmap: shrinking large map for KMSAN (%d slots to %ld slots)\n",
2541 		    lm_ents, KMSANORIGPML4I - LMSPML4I);
2542 		lm_ents = KMSANORIGPML4I - LMSPML4I;
2543 	}
2544 #endif
2545 	if (bootverbose)
2546 		printf("pmap: large map %u PML4 slots (%lu GB)\n",
2547 		    lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024));
2548 	if (lm_ents != 0) {
2549 		large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS,
2550 		    (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK);
2551 		if (large_vmem == NULL) {
2552 			printf("pmap: cannot create large map\n");
2553 			lm_ents = 0;
2554 		}
2555 		for (i = 0; i < lm_ents; i++) {
2556 			m = pmap_large_map_getptp_unlocked();
2557 			/* XXXKIB la57 */
2558 			kernel_pml4[LMSPML4I + i] = X86_PG_V |
2559 			    X86_PG_RW | X86_PG_A | X86_PG_M | pg_nx |
2560 			    VM_PAGE_TO_PHYS(m);
2561 		}
2562 	}
2563 }
2564 
2565 SYSCTL_UINT(_vm_pmap, OID_AUTO, large_map_pml4_entries,
2566     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lm_ents, 0,
2567     "Maximum number of PML4 entries for use by large map (tunable).  "
2568     "Each entry corresponds to 512GB of address space.");
2569 
2570 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2571     "2MB page mapping counters");
2572 
2573 static COUNTER_U64_DEFINE_EARLY(pmap_pde_demotions);
2574 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, demotions,
2575     CTLFLAG_RD, &pmap_pde_demotions, "2MB page demotions");
2576 
2577 static COUNTER_U64_DEFINE_EARLY(pmap_pde_mappings);
2578 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
2579     &pmap_pde_mappings, "2MB page mappings");
2580 
2581 static COUNTER_U64_DEFINE_EARLY(pmap_pde_p_failures);
2582 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
2583     &pmap_pde_p_failures, "2MB page promotion failures");
2584 
2585 static COUNTER_U64_DEFINE_EARLY(pmap_pde_promotions);
2586 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
2587     &pmap_pde_promotions, "2MB page promotions");
2588 
2589 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2590     "1GB page mapping counters");
2591 
2592 static COUNTER_U64_DEFINE_EARLY(pmap_pdpe_demotions);
2593 SYSCTL_COUNTER_U64(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
2594     &pmap_pdpe_demotions, "1GB page demotions");
2595 
2596 /***************************************************
2597  * Low level helper routines.....
2598  ***************************************************/
2599 
2600 static pt_entry_t
2601 pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
2602 {
2603 	int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
2604 
2605 	switch (pmap->pm_type) {
2606 	case PT_X86:
2607 	case PT_RVI:
2608 		/* Verify that both PAT bits are not set at the same time */
2609 		KASSERT((entry & x86_pat_bits) != x86_pat_bits,
2610 		    ("Invalid PAT bits in entry %#lx", entry));
2611 
2612 		/* Swap the PAT bits if one of them is set */
2613 		if ((entry & x86_pat_bits) != 0)
2614 			entry ^= x86_pat_bits;
2615 		break;
2616 	case PT_EPT:
2617 		/*
2618 		 * Nothing to do - the memory attributes are represented
2619 		 * the same way for regular pages and superpages.
2620 		 */
2621 		break;
2622 	default:
2623 		panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
2624 	}
2625 
2626 	return (entry);
2627 }
2628 
2629 boolean_t
2630 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
2631 {
2632 
2633 	return (mode >= 0 && mode < PAT_INDEX_SIZE &&
2634 	    pat_index[(int)mode] >= 0);
2635 }
2636 
2637 /*
2638  * Determine the appropriate bits to set in a PTE or PDE for a specified
2639  * caching mode.
2640  */
2641 int
2642 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
2643 {
2644 	int cache_bits, pat_flag, pat_idx;
2645 
2646 	if (!pmap_is_valid_memattr(pmap, mode))
2647 		panic("Unknown caching mode %d\n", mode);
2648 
2649 	switch (pmap->pm_type) {
2650 	case PT_X86:
2651 	case PT_RVI:
2652 		/* The PAT bit is different for PTE's and PDE's. */
2653 		pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2654 
2655 		/* Map the caching mode to a PAT index. */
2656 		pat_idx = pat_index[mode];
2657 
2658 		/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
2659 		cache_bits = 0;
2660 		if (pat_idx & 0x4)
2661 			cache_bits |= pat_flag;
2662 		if (pat_idx & 0x2)
2663 			cache_bits |= PG_NC_PCD;
2664 		if (pat_idx & 0x1)
2665 			cache_bits |= PG_NC_PWT;
2666 		break;
2667 
2668 	case PT_EPT:
2669 		cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
2670 		break;
2671 
2672 	default:
2673 		panic("unsupported pmap type %d", pmap->pm_type);
2674 	}
2675 
2676 	return (cache_bits);
2677 }
2678 
2679 static int
2680 pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
2681 {
2682 	int mask;
2683 
2684 	switch (pmap->pm_type) {
2685 	case PT_X86:
2686 	case PT_RVI:
2687 		mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
2688 		break;
2689 	case PT_EPT:
2690 		mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
2691 		break;
2692 	default:
2693 		panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
2694 	}
2695 
2696 	return (mask);
2697 }
2698 
2699 static int
2700 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde)
2701 {
2702 	int pat_flag, pat_idx;
2703 
2704 	pat_idx = 0;
2705 	switch (pmap->pm_type) {
2706 	case PT_X86:
2707 	case PT_RVI:
2708 		/* The PAT bit is different for PTE's and PDE's. */
2709 		pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2710 
2711 		if ((pte & pat_flag) != 0)
2712 			pat_idx |= 0x4;
2713 		if ((pte & PG_NC_PCD) != 0)
2714 			pat_idx |= 0x2;
2715 		if ((pte & PG_NC_PWT) != 0)
2716 			pat_idx |= 0x1;
2717 		break;
2718 	case PT_EPT:
2719 		if ((pte & EPT_PG_IGNORE_PAT) != 0)
2720 			panic("EPT PTE %#lx has no PAT memory type", pte);
2721 		pat_idx = (pte & EPT_PG_MEMORY_TYPE(0x7)) >> 3;
2722 		break;
2723 	}
2724 
2725 	/* See pmap_init_pat(). */
2726 	if (pat_idx == 4)
2727 		pat_idx = 0;
2728 	if (pat_idx == 7)
2729 		pat_idx = 3;
2730 
2731 	return (pat_idx);
2732 }
2733 
2734 bool
2735 pmap_ps_enabled(pmap_t pmap)
2736 {
2737 
2738 	return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
2739 }
2740 
2741 static void
2742 pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
2743 {
2744 
2745 	switch (pmap->pm_type) {
2746 	case PT_X86:
2747 		break;
2748 	case PT_RVI:
2749 	case PT_EPT:
2750 		/*
2751 		 * XXX
2752 		 * This is a little bogus since the generation number is
2753 		 * supposed to be bumped up when a region of the address
2754 		 * space is invalidated in the page tables.
2755 		 *
2756 		 * In this case the old PDE entry is valid but yet we want
2757 		 * to make sure that any mappings using the old entry are
2758 		 * invalidated in the TLB.
2759 		 *
2760 		 * The reason this works as expected is because we rendezvous
2761 		 * "all" host cpus and force any vcpu context to exit as a
2762 		 * side-effect.
2763 		 */
2764 		atomic_add_long(&pmap->pm_eptgen, 1);
2765 		break;
2766 	default:
2767 		panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
2768 	}
2769 	pde_store(pde, newpde);
2770 }
2771 
2772 /*
2773  * After changing the page size for the specified virtual address in the page
2774  * table, flush the corresponding entries from the processor's TLB.  Only the
2775  * calling processor's TLB is affected.
2776  *
2777  * The calling thread must be pinned to a processor.
2778  */
2779 static void
2780 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
2781 {
2782 	pt_entry_t PG_G;
2783 
2784 	if (pmap_type_guest(pmap))
2785 		return;
2786 
2787 	KASSERT(pmap->pm_type == PT_X86,
2788 	    ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
2789 
2790 	PG_G = pmap_global_bit(pmap);
2791 
2792 	if ((newpde & PG_PS) == 0)
2793 		/* Demotion: flush a specific 2MB page mapping. */
2794 		invlpg(va);
2795 	else if ((newpde & PG_G) == 0)
2796 		/*
2797 		 * Promotion: flush every 4KB page mapping from the TLB
2798 		 * because there are too many to flush individually.
2799 		 */
2800 		invltlb();
2801 	else {
2802 		/*
2803 		 * Promotion: flush every 4KB page mapping from the TLB,
2804 		 * including any global (PG_G) mappings.
2805 		 */
2806 		invltlb_glob();
2807 	}
2808 }
2809 
2810 /*
2811  * The amd64 pmap uses different approaches to TLB invalidation
2812  * depending on the kernel configuration, available hardware features,
2813  * and known hardware errata.  The kernel configuration option that
2814  * has the greatest operational impact on TLB invalidation is PTI,
2815  * which is enabled automatically on affected Intel CPUs.  The most
2816  * impactful hardware features are first PCID, and then INVPCID
2817  * instruction presence.  PCID usage is quite different for PTI
2818  * vs. non-PTI.
2819  *
2820  * * Kernel Page Table Isolation (PTI or KPTI) is used to mitigate
2821  *   the Meltdown bug in some Intel CPUs.  Under PTI, each user address
2822  *   space is served by two page tables, user and kernel.  The user
2823  *   page table only maps user space and a kernel trampoline.  The
2824  *   kernel trampoline includes the entirety of the kernel text but
2825  *   only the kernel data that is needed to switch from user to kernel
2826  *   mode.  The kernel page table maps the user and kernel address
2827  *   spaces in their entirety.  It is identical to the per-process
2828  *   page table used in non-PTI mode.
2829  *
2830  *   User page tables are only used when the CPU is in user mode.
2831  *   Consequently, some TLB invalidations can be postponed until the
2832  *   switch from kernel to user mode.  In contrast, the user
2833  *   space part of the kernel page table is used for copyout(9), so
2834  *   TLB invalidations on this page table cannot be similarly postponed.
2835  *
2836  *   The existence of a user mode page table for the given pmap is
2837  *   indicated by a pm_ucr3 value that differs from PMAP_NO_CR3, in
2838  *   which case pm_ucr3 contains the %cr3 register value for the user
2839  *   mode page table's root.
2840  *
2841  * * The pm_active bitmask indicates which CPUs currently have the
2842  *   pmap active.  A CPU's bit is set on context switch to the pmap, and
2843  *   cleared on switching off this CPU.  For the kernel page table,
2844  *   the pm_active field is immutable and contains all CPUs.  The
2845  *   kernel page table is always logically active on every processor,
2846  *   but not necessarily in use by the hardware, e.g., in PTI mode.
2847  *
2848  *   When requesting invalidation of virtual addresses with
2849  *   pmap_invalidate_XXX() functions, the pmap sends shootdown IPIs to
2850  *   all CPUs recorded as active in pm_active.  Updates to and reads
2851  *   from pm_active are not synchronized, and so they may race with
2852  *   each other.  Shootdown handlers are prepared to handle the race.
2853  *
2854  * * PCID is an optional feature of the long mode x86 MMU where TLB
2855  *   entries are tagged with the 'Process ID' of the address space
2856  *   they belong to.  This feature provides a limited namespace for
2857  *   process identifiers, 12 bits, supporting 4095 simultaneous IDs
2858  *   total.
2859  *
2860  *   Allocation of a PCID to a pmap is done by an algorithm described
2861  *   in section 15.12, "Other TLB Consistency Algorithms", of
2862  *   Vahalia's book "Unix Internals".  A PCID cannot be allocated for
2863  *   the whole lifetime of a pmap in pmap_pinit() due to the limited
2864  *   namespace.  Instead, a per-CPU, per-pmap PCID is assigned when
2865  *   the CPU is about to start caching TLB entries from a pmap,
2866  *   i.e., on the context switch that activates the pmap on the CPU.
2867  *
2868  *   The PCID allocator maintains a per-CPU, per-pmap generation
2869  *   count, pm_gen, which is incremented each time a new PCID is
2870  *   allocated.  On TLB invalidation, the generation counters for the
2871  *   pmap are zeroed, which signals the context switch code that the
2872  *   previously allocated PCID is no longer valid.  Effectively,
2873  *   zeroing any of these counters triggers a TLB shootdown for the
2874  *   given CPU/address space, due to the allocation of a new PCID.
2875  *
2876  *   Zeroing can be performed remotely.  Consequently, if a pmap is
2877  *   inactive on a CPU, then a TLB shootdown for that pmap and CPU can
2878  *   be initiated by an ordinary memory access to reset the target
2879  *   CPU's generation count within the pmap.  The CPU initiating the
2880  *   TLB shootdown does not need to send an IPI to the target CPU.
2881  *
2882  * * PTI + PCID.  The available PCIDs are divided into two sets: PCIDs
2883  *   for complete (kernel) page tables, and PCIDs for user mode page
2884  *   tables.  A user PCID value is obtained from the kernel PCID value
2885  *   by setting the highest bit, 11, to 1 (0x800 == PMAP_PCID_USER_PT).
2886  *
2887  *   User space page tables are activated on return to user mode, by
2888  *   loading pm_ucr3 into %cr3.  If the PCPU(ucr3_load_mask) requests
2889  *   clearing bit 63 of the loaded ucr3, this effectively causes
2890  *   complete invalidation of the user mode TLB entries for the
2891  *   current pmap.  In which case, local invalidations of individual
2892  *   pages in the user page table are skipped.
2893  *
2894  * * Local invalidation, all modes.  If the requested invalidation is
2895  *   for a specific address or the total invalidation of a currently
2896  *   active pmap, then the TLB is flushed using INVLPG for a kernel
2897  *   page table, and INVPCID(INVPCID_CTXGLOB)/invltlb_glob() for a
2898  *   user space page table(s).
2899  *
2900  *   If the INVPCID instruction is available, it is used to flush entries
2901  *   from the kernel page table.
2902  *
2903  * * mode: PTI disabled, PCID present.  The kernel reserves PCID 0 for its
2904  *   address space, all other 4095 PCIDs are used for user mode spaces
2905  *   as described above.  A context switch allocates a new PCID if
2906  *   the recorded PCID is zero or the recorded generation does not match
2907  *   the CPU's generation, effectively flushing the TLB for this address space.
2908  *   Total remote invalidation is performed by zeroing pm_gen for all CPUs.
2909  *	local user page: INVLPG
2910  *	local kernel page: INVLPG
2911  *	local user total: INVPCID(CTX)
2912  *	local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2913  *	remote user page, inactive pmap: zero pm_gen
2914  *	remote user page, active pmap: zero pm_gen + IPI:INVLPG
2915  *	(Both actions are required to handle the aforementioned pm_active races.)
2916  *	remote kernel page: IPI:INVLPG
2917  *	remote user total, inactive pmap: zero pm_gen
2918  *	remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) or
2919  *          reload %cr3)
2920  *	(See note above about pm_active races.)
2921  *	remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2922  *
2923  * PTI enabled, PCID present.
2924  *	local user page: INVLPG for kpt, INVPCID(ADDR) or (INVLPG for ucr3)
2925  *          for upt
2926  *	local kernel page: INVLPG
2927  *	local user total: INVPCID(CTX) or reload %cr3 for kpt, clear PCID_SAVE
2928  *          on loading UCR3 into %cr3 for upt
2929  *	local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2930  *	remote user page, inactive pmap: zero pm_gen
2931  *	remote user page, active pmap: zero pm_gen + IPI:(INVLPG for kpt,
2932  *          INVPCID(ADDR) for upt)
2933  *	remote kernel page: IPI:INVLPG
2934  *	remote user total, inactive pmap: zero pm_gen
2935  *	remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) for kpt,
2936  *          clear PCID_SAVE on loading UCR3 into $cr3 for upt)
2937  *	remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2938  *
2939  *  No PCID.
2940  *	local user page: INVLPG
2941  *	local kernel page: INVLPG
2942  *	local user total: reload %cr3
2943  *	local kernel total: invltlb_glob()
2944  *	remote user page, inactive pmap: -
2945  *	remote user page, active pmap: IPI:INVLPG
2946  *	remote kernel page: IPI:INVLPG
2947  *	remote user total, inactive pmap: -
2948  *	remote user total, active pmap: IPI:(reload %cr3)
2949  *	remote kernel total: IPI:invltlb_glob()
2950  *  Since on return to user mode, the reload of %cr3 with ucr3 causes
2951  *  TLB invalidation, no specific action is required for user page table.
2952  *
2953  * EPT.  EPT pmaps do not map KVA, all mappings are userspace.
2954  * XXX TODO
2955  */
2956 
2957 #ifdef SMP
2958 /*
2959  * Interrupt the cpus that are executing in the guest context.
2960  * This will force the vcpu to exit and the cached EPT mappings
2961  * will be invalidated by the host before the next vmresume.
2962  */
2963 static __inline void
2964 pmap_invalidate_ept(pmap_t pmap)
2965 {
2966 	smr_seq_t goal;
2967 	int ipinum;
2968 
2969 	sched_pin();
2970 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
2971 	    ("pmap_invalidate_ept: absurd pm_active"));
2972 
2973 	/*
2974 	 * The TLB mappings associated with a vcpu context are not
2975 	 * flushed each time a different vcpu is chosen to execute.
2976 	 *
2977 	 * This is in contrast with a process's vtop mappings that
2978 	 * are flushed from the TLB on each context switch.
2979 	 *
2980 	 * Therefore we need to do more than just a TLB shootdown on
2981 	 * the active cpus in 'pmap->pm_active'. To do this we keep
2982 	 * track of the number of invalidations performed on this pmap.
2983 	 *
2984 	 * Each vcpu keeps a cache of this counter and compares it
2985 	 * just before a vmresume. If the counter is out-of-date an
2986 	 * invept will be done to flush stale mappings from the TLB.
2987 	 *
2988 	 * To ensure that all vCPU threads have observed the new counter
2989 	 * value before returning, we use SMR.  Ordering is important here:
2990 	 * the VMM enters an SMR read section before loading the counter
2991 	 * and after updating the pm_active bit set.  Thus, pm_active is
2992 	 * a superset of active readers, and any reader that has observed
2993 	 * the goal has observed the new counter value.
2994 	 */
2995 	atomic_add_long(&pmap->pm_eptgen, 1);
2996 
2997 	goal = smr_advance(pmap->pm_eptsmr);
2998 
2999 	/*
3000 	 * Force the vcpu to exit and trap back into the hypervisor.
3001 	 */
3002 	ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
3003 	ipi_selected(pmap->pm_active, ipinum);
3004 	sched_unpin();
3005 
3006 	/*
3007 	 * Ensure that all active vCPUs will observe the new generation counter
3008 	 * value before executing any more guest instructions.
3009 	 */
3010 	smr_wait(pmap->pm_eptsmr, goal);
3011 }
3012 
3013 static inline void
3014 pmap_invalidate_preipi_pcid(pmap_t pmap)
3015 {
3016 	u_int cpuid, i;
3017 
3018 	sched_pin();
3019 
3020 	cpuid = PCPU_GET(cpuid);
3021 	if (pmap != PCPU_GET(curpmap))
3022 		cpuid = 0xffffffff;	/* An impossible value */
3023 
3024 	CPU_FOREACH(i) {
3025 		if (cpuid != i)
3026 			pmap->pm_pcids[i].pm_gen = 0;
3027 	}
3028 
3029 	/*
3030 	 * The fence is between stores to pm_gen and the read of the
3031 	 * pm_active mask.  We need to ensure that it is impossible
3032 	 * for us to miss the bit update in pm_active and
3033 	 * simultaneously observe a non-zero pm_gen in
3034 	 * pmap_activate_sw(), otherwise TLB update is missed.
3035 	 * Without the fence, IA32 allows such an outcome.  Note that
3036 	 * pm_active is updated by a locked operation, which provides
3037 	 * the reciprocal fence.
3038 	 */
3039 	atomic_thread_fence_seq_cst();
3040 }
3041 
3042 static void
3043 pmap_invalidate_preipi_nopcid(pmap_t pmap __unused)
3044 {
3045 	sched_pin();
3046 }
3047 
3048 DEFINE_IFUNC(static, void, pmap_invalidate_preipi, (pmap_t))
3049 {
3050 	return (pmap_pcid_enabled ? pmap_invalidate_preipi_pcid :
3051 	    pmap_invalidate_preipi_nopcid);
3052 }
3053 
3054 static inline void
3055 pmap_invalidate_page_pcid_cb(pmap_t pmap, vm_offset_t va,
3056     const bool invpcid_works1)
3057 {
3058 	struct invpcid_descr d;
3059 	uint64_t kcr3, ucr3;
3060 	uint32_t pcid;
3061 	u_int cpuid;
3062 
3063 	/*
3064 	 * Because pm_pcid is recalculated on a context switch, we
3065 	 * must ensure there is no preemption, not just pinning.
3066 	 * Otherwise, we might use a stale value below.
3067 	 */
3068 	CRITICAL_ASSERT(curthread);
3069 
3070 	/*
3071 	 * No need to do anything with user page tables invalidation
3072 	 * if there is no user page table, or invalidation is deferred
3073 	 * until the return to userspace.  ucr3_load_mask is stable
3074 	 * because we have preemption disabled.
3075 	 */
3076 	if (pmap->pm_ucr3 == PMAP_NO_CR3 ||
3077 	    PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
3078 		return;
3079 
3080 	cpuid = PCPU_GET(cpuid);
3081 
3082 	pcid = pmap->pm_pcids[cpuid].pm_pcid;
3083 	if (invpcid_works1) {
3084 		d.pcid = pcid | PMAP_PCID_USER_PT;
3085 		d.pad = 0;
3086 		d.addr = va;
3087 		invpcid(&d, INVPCID_ADDR);
3088 	} else {
3089 		kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3090 		ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3091 		pmap_pti_pcid_invlpg(ucr3, kcr3, va);
3092 	}
3093 }
3094 
3095 static void
3096 pmap_invalidate_page_pcid_invpcid_cb(pmap_t pmap, vm_offset_t va)
3097 {
3098 	pmap_invalidate_page_pcid_cb(pmap, va, true);
3099 }
3100 
3101 static void
3102 pmap_invalidate_page_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t va)
3103 {
3104 	pmap_invalidate_page_pcid_cb(pmap, va, false);
3105 }
3106 
3107 static void
3108 pmap_invalidate_page_nopcid_cb(pmap_t pmap __unused, vm_offset_t va __unused)
3109 {
3110 }
3111 
3112 DEFINE_IFUNC(static, void, pmap_invalidate_page_cb, (pmap_t, vm_offset_t))
3113 {
3114 	if (pmap_pcid_enabled)
3115 		return (invpcid_works ? pmap_invalidate_page_pcid_invpcid_cb :
3116 		    pmap_invalidate_page_pcid_noinvpcid_cb);
3117 	return (pmap_invalidate_page_nopcid_cb);
3118 }
3119 
3120 static void
3121 pmap_invalidate_page_curcpu_cb(pmap_t pmap, vm_offset_t va,
3122     vm_offset_t addr2 __unused)
3123 {
3124 	if (pmap == kernel_pmap) {
3125 		invlpg(va);
3126 	} else if (pmap == PCPU_GET(curpmap)) {
3127 		invlpg(va);
3128 		pmap_invalidate_page_cb(pmap, va);
3129 	}
3130 }
3131 
3132 void
3133 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3134 {
3135 	if (pmap_type_guest(pmap)) {
3136 		pmap_invalidate_ept(pmap);
3137 		return;
3138 	}
3139 
3140 	KASSERT(pmap->pm_type == PT_X86,
3141 	    ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
3142 
3143 	pmap_invalidate_preipi(pmap);
3144 	smp_masked_invlpg(va, pmap, pmap_invalidate_page_curcpu_cb);
3145 }
3146 
3147 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
3148 #define	PMAP_INVLPG_THRESHOLD	(4 * 1024 * PAGE_SIZE)
3149 
3150 static void
3151 pmap_invalidate_range_pcid_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3152     const bool invpcid_works1)
3153 {
3154 	struct invpcid_descr d;
3155 	uint64_t kcr3, ucr3;
3156 	uint32_t pcid;
3157 	u_int cpuid;
3158 
3159 	CRITICAL_ASSERT(curthread);
3160 
3161 	if (pmap != PCPU_GET(curpmap) ||
3162 	    pmap->pm_ucr3 == PMAP_NO_CR3 ||
3163 	    PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
3164 		return;
3165 
3166 	cpuid = PCPU_GET(cpuid);
3167 
3168 	pcid = pmap->pm_pcids[cpuid].pm_pcid;
3169 	if (invpcid_works1) {
3170 		d.pcid = pcid | PMAP_PCID_USER_PT;
3171 		d.pad = 0;
3172 		for (d.addr = sva; d.addr < eva; d.addr += PAGE_SIZE)
3173 			invpcid(&d, INVPCID_ADDR);
3174 	} else {
3175 		kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3176 		ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3177 		pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3178 	}
3179 }
3180 
3181 static void
3182 pmap_invalidate_range_pcid_invpcid_cb(pmap_t pmap, vm_offset_t sva,
3183     vm_offset_t eva)
3184 {
3185 	pmap_invalidate_range_pcid_cb(pmap, sva, eva, true);
3186 }
3187 
3188 static void
3189 pmap_invalidate_range_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t sva,
3190     vm_offset_t eva)
3191 {
3192 	pmap_invalidate_range_pcid_cb(pmap, sva, eva, false);
3193 }
3194 
3195 static void
3196 pmap_invalidate_range_nopcid_cb(pmap_t pmap __unused, vm_offset_t sva __unused,
3197     vm_offset_t eva __unused)
3198 {
3199 }
3200 
3201 DEFINE_IFUNC(static, void, pmap_invalidate_range_cb, (pmap_t, vm_offset_t,
3202     vm_offset_t))
3203 {
3204 	if (pmap_pcid_enabled)
3205 		return (invpcid_works ? pmap_invalidate_range_pcid_invpcid_cb :
3206 		    pmap_invalidate_range_pcid_noinvpcid_cb);
3207 	return (pmap_invalidate_range_nopcid_cb);
3208 }
3209 
3210 static void
3211 pmap_invalidate_range_curcpu_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3212 {
3213 	vm_offset_t addr;
3214 
3215 	if (pmap == kernel_pmap) {
3216 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3217 			invlpg(addr);
3218 	} else if (pmap == PCPU_GET(curpmap)) {
3219 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3220 			invlpg(addr);
3221 		pmap_invalidate_range_cb(pmap, sva, eva);
3222 	}
3223 }
3224 
3225 void
3226 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3227 {
3228 	if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
3229 		pmap_invalidate_all(pmap);
3230 		return;
3231 	}
3232 
3233 	if (pmap_type_guest(pmap)) {
3234 		pmap_invalidate_ept(pmap);
3235 		return;
3236 	}
3237 
3238 	KASSERT(pmap->pm_type == PT_X86,
3239 	    ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
3240 
3241 	pmap_invalidate_preipi(pmap);
3242 	smp_masked_invlpg_range(sva, eva, pmap,
3243 	    pmap_invalidate_range_curcpu_cb);
3244 }
3245 
3246 static inline void
3247 pmap_invalidate_all_pcid_cb(pmap_t pmap, bool invpcid_works1)
3248 {
3249 	struct invpcid_descr d;
3250 	uint64_t kcr3;
3251 	uint32_t pcid;
3252 	u_int cpuid;
3253 
3254 	if (pmap == kernel_pmap) {
3255 		if (invpcid_works1) {
3256 			bzero(&d, sizeof(d));
3257 			invpcid(&d, INVPCID_CTXGLOB);
3258 		} else {
3259 			invltlb_glob();
3260 		}
3261 	} else if (pmap == PCPU_GET(curpmap)) {
3262 		CRITICAL_ASSERT(curthread);
3263 		cpuid = PCPU_GET(cpuid);
3264 
3265 		pcid = pmap->pm_pcids[cpuid].pm_pcid;
3266 		if (invpcid_works1) {
3267 			d.pcid = pcid;
3268 			d.pad = 0;
3269 			d.addr = 0;
3270 			invpcid(&d, INVPCID_CTX);
3271 		} else {
3272 			kcr3 = pmap->pm_cr3 | pcid;
3273 			load_cr3(kcr3);
3274 		}
3275 		if (pmap->pm_ucr3 != PMAP_NO_CR3)
3276 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
3277 	}
3278 }
3279 
3280 static void
3281 pmap_invalidate_all_pcid_invpcid_cb(pmap_t pmap)
3282 {
3283 	pmap_invalidate_all_pcid_cb(pmap, true);
3284 }
3285 
3286 static void
3287 pmap_invalidate_all_pcid_noinvpcid_cb(pmap_t pmap)
3288 {
3289 	pmap_invalidate_all_pcid_cb(pmap, false);
3290 }
3291 
3292 static void
3293 pmap_invalidate_all_nopcid_cb(pmap_t pmap)
3294 {
3295 	if (pmap == kernel_pmap)
3296 		invltlb_glob();
3297 	else if (pmap == PCPU_GET(curpmap))
3298 		invltlb();
3299 }
3300 
3301 DEFINE_IFUNC(static, void, pmap_invalidate_all_cb, (pmap_t))
3302 {
3303 	if (pmap_pcid_enabled)
3304 		return (invpcid_works ? pmap_invalidate_all_pcid_invpcid_cb :
3305 		    pmap_invalidate_all_pcid_noinvpcid_cb);
3306 	return (pmap_invalidate_all_nopcid_cb);
3307 }
3308 
3309 static void
3310 pmap_invalidate_all_curcpu_cb(pmap_t pmap, vm_offset_t addr1 __unused,
3311     vm_offset_t addr2 __unused)
3312 {
3313 	pmap_invalidate_all_cb(pmap);
3314 }
3315 
3316 void
3317 pmap_invalidate_all(pmap_t pmap)
3318 {
3319 	if (pmap_type_guest(pmap)) {
3320 		pmap_invalidate_ept(pmap);
3321 		return;
3322 	}
3323 
3324 	KASSERT(pmap->pm_type == PT_X86,
3325 	    ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
3326 
3327 	pmap_invalidate_preipi(pmap);
3328 	smp_masked_invltlb(pmap, pmap_invalidate_all_curcpu_cb);
3329 }
3330 
3331 static void
3332 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, vm_offset_t va __unused,
3333     vm_offset_t addr2 __unused)
3334 {
3335 	wbinvd();
3336 }
3337 
3338 void
3339 pmap_invalidate_cache(void)
3340 {
3341 	sched_pin();
3342 	smp_cache_flush(pmap_invalidate_cache_curcpu_cb);
3343 }
3344 
3345 struct pde_action {
3346 	cpuset_t invalidate;	/* processors that invalidate their TLB */
3347 	pmap_t pmap;
3348 	vm_offset_t va;
3349 	pd_entry_t *pde;
3350 	pd_entry_t newpde;
3351 	u_int store;		/* processor that updates the PDE */
3352 };
3353 
3354 static void
3355 pmap_update_pde_action(void *arg)
3356 {
3357 	struct pde_action *act = arg;
3358 
3359 	if (act->store == PCPU_GET(cpuid))
3360 		pmap_update_pde_store(act->pmap, act->pde, act->newpde);
3361 }
3362 
3363 static void
3364 pmap_update_pde_teardown(void *arg)
3365 {
3366 	struct pde_action *act = arg;
3367 
3368 	if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
3369 		pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
3370 }
3371 
3372 /*
3373  * Change the page size for the specified virtual address in a way that
3374  * prevents any possibility of the TLB ever having two entries that map the
3375  * same virtual address using different page sizes.  This is the recommended
3376  * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
3377  * machine check exception for a TLB state that is improperly diagnosed as a
3378  * hardware error.
3379  */
3380 static void
3381 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3382 {
3383 	struct pde_action act;
3384 	cpuset_t active, other_cpus;
3385 	u_int cpuid;
3386 
3387 	sched_pin();
3388 	cpuid = PCPU_GET(cpuid);
3389 	other_cpus = all_cpus;
3390 	CPU_CLR(cpuid, &other_cpus);
3391 	if (pmap == kernel_pmap || pmap_type_guest(pmap))
3392 		active = all_cpus;
3393 	else {
3394 		active = pmap->pm_active;
3395 	}
3396 	if (CPU_OVERLAP(&active, &other_cpus)) {
3397 		act.store = cpuid;
3398 		act.invalidate = active;
3399 		act.va = va;
3400 		act.pmap = pmap;
3401 		act.pde = pde;
3402 		act.newpde = newpde;
3403 		CPU_SET(cpuid, &active);
3404 		smp_rendezvous_cpus(active,
3405 		    smp_no_rendezvous_barrier, pmap_update_pde_action,
3406 		    pmap_update_pde_teardown, &act);
3407 	} else {
3408 		pmap_update_pde_store(pmap, pde, newpde);
3409 		if (CPU_ISSET(cpuid, &active))
3410 			pmap_update_pde_invalidate(pmap, va, newpde);
3411 	}
3412 	sched_unpin();
3413 }
3414 #else /* !SMP */
3415 /*
3416  * Normal, non-SMP, invalidation functions.
3417  */
3418 void
3419 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3420 {
3421 	struct invpcid_descr d;
3422 	uint64_t kcr3, ucr3;
3423 	uint32_t pcid;
3424 
3425 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3426 		pmap->pm_eptgen++;
3427 		return;
3428 	}
3429 	KASSERT(pmap->pm_type == PT_X86,
3430 	    ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3431 
3432 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3433 		invlpg(va);
3434 		if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3435 		    pmap->pm_ucr3 != PMAP_NO_CR3) {
3436 			critical_enter();
3437 			pcid = pmap->pm_pcids[0].pm_pcid;
3438 			if (invpcid_works) {
3439 				d.pcid = pcid | PMAP_PCID_USER_PT;
3440 				d.pad = 0;
3441 				d.addr = va;
3442 				invpcid(&d, INVPCID_ADDR);
3443 			} else {
3444 				kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3445 				ucr3 = pmap->pm_ucr3 | pcid |
3446 				    PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3447 				pmap_pti_pcid_invlpg(ucr3, kcr3, va);
3448 			}
3449 			critical_exit();
3450 		}
3451 	} else if (pmap_pcid_enabled)
3452 		pmap->pm_pcids[0].pm_gen = 0;
3453 }
3454 
3455 void
3456 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3457 {
3458 	struct invpcid_descr d;
3459 	vm_offset_t addr;
3460 	uint64_t kcr3, ucr3;
3461 
3462 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3463 		pmap->pm_eptgen++;
3464 		return;
3465 	}
3466 	KASSERT(pmap->pm_type == PT_X86,
3467 	    ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3468 
3469 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3470 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3471 			invlpg(addr);
3472 		if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3473 		    pmap->pm_ucr3 != PMAP_NO_CR3) {
3474 			critical_enter();
3475 			if (invpcid_works) {
3476 				d.pcid = pmap->pm_pcids[0].pm_pcid |
3477 				    PMAP_PCID_USER_PT;
3478 				d.pad = 0;
3479 				d.addr = sva;
3480 				for (; d.addr < eva; d.addr += PAGE_SIZE)
3481 					invpcid(&d, INVPCID_ADDR);
3482 			} else {
3483 				kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].
3484 				    pm_pcid | CR3_PCID_SAVE;
3485 				ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[0].
3486 				    pm_pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3487 				pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3488 			}
3489 			critical_exit();
3490 		}
3491 	} else if (pmap_pcid_enabled) {
3492 		pmap->pm_pcids[0].pm_gen = 0;
3493 	}
3494 }
3495 
3496 void
3497 pmap_invalidate_all(pmap_t pmap)
3498 {
3499 	struct invpcid_descr d;
3500 	uint64_t kcr3, ucr3;
3501 
3502 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3503 		pmap->pm_eptgen++;
3504 		return;
3505 	}
3506 	KASSERT(pmap->pm_type == PT_X86,
3507 	    ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
3508 
3509 	if (pmap == kernel_pmap) {
3510 		if (pmap_pcid_enabled && invpcid_works) {
3511 			bzero(&d, sizeof(d));
3512 			invpcid(&d, INVPCID_CTXGLOB);
3513 		} else {
3514 			invltlb_glob();
3515 		}
3516 	} else if (pmap == PCPU_GET(curpmap)) {
3517 		if (pmap_pcid_enabled) {
3518 			critical_enter();
3519 			if (invpcid_works) {
3520 				d.pcid = pmap->pm_pcids[0].pm_pcid;
3521 				d.pad = 0;
3522 				d.addr = 0;
3523 				invpcid(&d, INVPCID_CTX);
3524 				if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3525 					d.pcid |= PMAP_PCID_USER_PT;
3526 					invpcid(&d, INVPCID_CTX);
3527 				}
3528 			} else {
3529 				kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].pm_pcid;
3530 				if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3531 					ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[
3532 					    0].pm_pcid | PMAP_PCID_USER_PT;
3533 					pmap_pti_pcid_invalidate(ucr3, kcr3);
3534 				} else
3535 					load_cr3(kcr3);
3536 			}
3537 			critical_exit();
3538 		} else {
3539 			invltlb();
3540 		}
3541 	} else if (pmap_pcid_enabled) {
3542 		pmap->pm_pcids[0].pm_gen = 0;
3543 	}
3544 }
3545 
3546 PMAP_INLINE void
3547 pmap_invalidate_cache(void)
3548 {
3549 
3550 	wbinvd();
3551 }
3552 
3553 static void
3554 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3555 {
3556 
3557 	pmap_update_pde_store(pmap, pde, newpde);
3558 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
3559 		pmap_update_pde_invalidate(pmap, va, newpde);
3560 	else
3561 		pmap->pm_pcids[0].pm_gen = 0;
3562 }
3563 #endif /* !SMP */
3564 
3565 static void
3566 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
3567 {
3568 
3569 	/*
3570 	 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
3571 	 * by a promotion that did not invalidate the 512 4KB page mappings
3572 	 * that might exist in the TLB.  Consequently, at this point, the TLB
3573 	 * may hold both 4KB and 2MB page mappings for the address range [va,
3574 	 * va + NBPDR).  Therefore, the entire range must be invalidated here.
3575 	 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
3576 	 * 4KB page mappings for the address range [va, va + NBPDR), and so a
3577 	 * single INVLPG suffices to invalidate the 2MB page mapping from the
3578 	 * TLB.
3579 	 */
3580 	if ((pde & PG_PROMOTED) != 0)
3581 		pmap_invalidate_range(pmap, va, va + NBPDR - 1);
3582 	else
3583 		pmap_invalidate_page(pmap, va);
3584 }
3585 
3586 DEFINE_IFUNC(, void, pmap_invalidate_cache_range,
3587     (vm_offset_t sva, vm_offset_t eva))
3588 {
3589 
3590 	if ((cpu_feature & CPUID_SS) != 0)
3591 		return (pmap_invalidate_cache_range_selfsnoop);
3592 	if ((cpu_feature & CPUID_CLFSH) != 0)
3593 		return (pmap_force_invalidate_cache_range);
3594 	return (pmap_invalidate_cache_range_all);
3595 }
3596 
3597 #define PMAP_CLFLUSH_THRESHOLD   (2 * 1024 * 1024)
3598 
3599 static void
3600 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
3601 {
3602 
3603 	KASSERT((sva & PAGE_MASK) == 0,
3604 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
3605 	KASSERT((eva & PAGE_MASK) == 0,
3606 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
3607 }
3608 
3609 static void
3610 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
3611 {
3612 
3613 	pmap_invalidate_cache_range_check_align(sva, eva);
3614 }
3615 
3616 void
3617 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
3618 {
3619 
3620 	sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
3621 
3622 	/*
3623 	 * XXX: Some CPUs fault, hang, or trash the local APIC
3624 	 * registers if we use CLFLUSH on the local APIC range.  The
3625 	 * local APIC is always uncached, so we don't need to flush
3626 	 * for that range anyway.
3627 	 */
3628 	if (pmap_kextract(sva) == lapic_paddr)
3629 		return;
3630 
3631 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
3632 		/*
3633 		 * Do per-cache line flush.  Use a locked
3634 		 * instruction to insure that previous stores are
3635 		 * included in the write-back.  The processor
3636 		 * propagates flush to other processors in the cache
3637 		 * coherence domain.
3638 		 */
3639 		atomic_thread_fence_seq_cst();
3640 		for (; sva < eva; sva += cpu_clflush_line_size)
3641 			clflushopt(sva);
3642 		atomic_thread_fence_seq_cst();
3643 	} else {
3644 		/*
3645 		 * Writes are ordered by CLFLUSH on Intel CPUs.
3646 		 */
3647 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
3648 			mfence();
3649 		for (; sva < eva; sva += cpu_clflush_line_size)
3650 			clflush(sva);
3651 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
3652 			mfence();
3653 	}
3654 }
3655 
3656 static void
3657 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
3658 {
3659 
3660 	pmap_invalidate_cache_range_check_align(sva, eva);
3661 	pmap_invalidate_cache();
3662 }
3663 
3664 /*
3665  * Remove the specified set of pages from the data and instruction caches.
3666  *
3667  * In contrast to pmap_invalidate_cache_range(), this function does not
3668  * rely on the CPU's self-snoop feature, because it is intended for use
3669  * when moving pages into a different cache domain.
3670  */
3671 void
3672 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
3673 {
3674 	vm_offset_t daddr, eva;
3675 	int i;
3676 	bool useclflushopt;
3677 
3678 	useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
3679 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
3680 	    ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
3681 		pmap_invalidate_cache();
3682 	else {
3683 		if (useclflushopt)
3684 			atomic_thread_fence_seq_cst();
3685 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3686 			mfence();
3687 		for (i = 0; i < count; i++) {
3688 			daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
3689 			eva = daddr + PAGE_SIZE;
3690 			for (; daddr < eva; daddr += cpu_clflush_line_size) {
3691 				if (useclflushopt)
3692 					clflushopt(daddr);
3693 				else
3694 					clflush(daddr);
3695 			}
3696 		}
3697 		if (useclflushopt)
3698 			atomic_thread_fence_seq_cst();
3699 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3700 			mfence();
3701 	}
3702 }
3703 
3704 void
3705 pmap_flush_cache_range(vm_offset_t sva, vm_offset_t eva)
3706 {
3707 
3708 	pmap_invalidate_cache_range_check_align(sva, eva);
3709 
3710 	if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) == 0) {
3711 		pmap_force_invalidate_cache_range(sva, eva);
3712 		return;
3713 	}
3714 
3715 	/* See comment in pmap_force_invalidate_cache_range(). */
3716 	if (pmap_kextract(sva) == lapic_paddr)
3717 		return;
3718 
3719 	atomic_thread_fence_seq_cst();
3720 	for (; sva < eva; sva += cpu_clflush_line_size)
3721 		clwb(sva);
3722 	atomic_thread_fence_seq_cst();
3723 }
3724 
3725 void
3726 pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
3727 {
3728 	pt_entry_t *pte;
3729 	vm_offset_t vaddr;
3730 	int error __diagused;
3731 	int pte_bits;
3732 
3733 	KASSERT((spa & PAGE_MASK) == 0,
3734 	    ("pmap_flush_cache_phys_range: spa not page-aligned"));
3735 	KASSERT((epa & PAGE_MASK) == 0,
3736 	    ("pmap_flush_cache_phys_range: epa not page-aligned"));
3737 
3738 	if (spa < dmaplimit) {
3739 		pmap_flush_cache_range(PHYS_TO_DMAP(spa), PHYS_TO_DMAP(MIN(
3740 		    dmaplimit, epa)));
3741 		if (dmaplimit >= epa)
3742 			return;
3743 		spa = dmaplimit;
3744 	}
3745 
3746 	pte_bits = pmap_cache_bits(kernel_pmap, mattr, 0) | X86_PG_RW |
3747 	    X86_PG_V;
3748 	error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3749 	    &vaddr);
3750 	KASSERT(error == 0, ("vmem_alloc failed: %d", error));
3751 	pte = vtopte(vaddr);
3752 	for (; spa < epa; spa += PAGE_SIZE) {
3753 		sched_pin();
3754 		pte_store(pte, spa | pte_bits);
3755 		invlpg(vaddr);
3756 		/* XXXKIB atomic inside flush_cache_range are excessive */
3757 		pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
3758 		sched_unpin();
3759 	}
3760 	vmem_free(kernel_arena, vaddr, PAGE_SIZE);
3761 }
3762 
3763 /*
3764  *	Routine:	pmap_extract
3765  *	Function:
3766  *		Extract the physical page address associated
3767  *		with the given map/virtual_address pair.
3768  */
3769 vm_paddr_t
3770 pmap_extract(pmap_t pmap, vm_offset_t va)
3771 {
3772 	pdp_entry_t *pdpe;
3773 	pd_entry_t *pde;
3774 	pt_entry_t *pte, PG_V;
3775 	vm_paddr_t pa;
3776 
3777 	pa = 0;
3778 	PG_V = pmap_valid_bit(pmap);
3779 	PMAP_LOCK(pmap);
3780 	pdpe = pmap_pdpe(pmap, va);
3781 	if (pdpe != NULL && (*pdpe & PG_V) != 0) {
3782 		if ((*pdpe & PG_PS) != 0)
3783 			pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
3784 		else {
3785 			pde = pmap_pdpe_to_pde(pdpe, va);
3786 			if ((*pde & PG_V) != 0) {
3787 				if ((*pde & PG_PS) != 0) {
3788 					pa = (*pde & PG_PS_FRAME) |
3789 					    (va & PDRMASK);
3790 				} else {
3791 					pte = pmap_pde_to_pte(pde, va);
3792 					pa = (*pte & PG_FRAME) |
3793 					    (va & PAGE_MASK);
3794 				}
3795 			}
3796 		}
3797 	}
3798 	PMAP_UNLOCK(pmap);
3799 	return (pa);
3800 }
3801 
3802 /*
3803  *	Routine:	pmap_extract_and_hold
3804  *	Function:
3805  *		Atomically extract and hold the physical page
3806  *		with the given pmap and virtual address pair
3807  *		if that mapping permits the given protection.
3808  */
3809 vm_page_t
3810 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3811 {
3812 	pdp_entry_t pdpe, *pdpep;
3813 	pd_entry_t pde, *pdep;
3814 	pt_entry_t pte, PG_RW, PG_V;
3815 	vm_page_t m;
3816 
3817 	m = NULL;
3818 	PG_RW = pmap_rw_bit(pmap);
3819 	PG_V = pmap_valid_bit(pmap);
3820 	PMAP_LOCK(pmap);
3821 
3822 	pdpep = pmap_pdpe(pmap, va);
3823 	if (pdpep == NULL || ((pdpe = *pdpep) & PG_V) == 0)
3824 		goto out;
3825 	if ((pdpe & PG_PS) != 0) {
3826 		if ((pdpe & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3827 			goto out;
3828 		m = PHYS_TO_VM_PAGE((pdpe & PG_PS_FRAME) | (va & PDPMASK));
3829 		goto check_page;
3830 	}
3831 
3832 	pdep = pmap_pdpe_to_pde(pdpep, va);
3833 	if (pdep == NULL || ((pde = *pdep) & PG_V) == 0)
3834 		goto out;
3835 	if ((pde & PG_PS) != 0) {
3836 		if ((pde & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3837 			goto out;
3838 		m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK));
3839 		goto check_page;
3840 	}
3841 
3842 	pte = *pmap_pde_to_pte(pdep, va);
3843 	if ((pte & PG_V) == 0 ||
3844 	    ((pte & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0))
3845 		goto out;
3846 	m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3847 
3848 check_page:
3849 	if (m != NULL && !vm_page_wire_mapped(m))
3850 		m = NULL;
3851 out:
3852 	PMAP_UNLOCK(pmap);
3853 	return (m);
3854 }
3855 
3856 vm_paddr_t
3857 pmap_kextract(vm_offset_t va)
3858 {
3859 	pd_entry_t pde;
3860 	vm_paddr_t pa;
3861 
3862 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
3863 		pa = DMAP_TO_PHYS(va);
3864 	} else if (PMAP_ADDRESS_IN_LARGEMAP(va)) {
3865 		pa = pmap_large_map_kextract(va);
3866 	} else {
3867 		pde = *vtopde(va);
3868 		if (pde & PG_PS) {
3869 			pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
3870 		} else {
3871 			/*
3872 			 * Beware of a concurrent promotion that changes the
3873 			 * PDE at this point!  For example, vtopte() must not
3874 			 * be used to access the PTE because it would use the
3875 			 * new PDE.  It is, however, safe to use the old PDE
3876 			 * because the page table page is preserved by the
3877 			 * promotion.
3878 			 */
3879 			pa = *pmap_pde_to_pte(&pde, va);
3880 			pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3881 		}
3882 	}
3883 	return (pa);
3884 }
3885 
3886 /***************************************************
3887  * Low level mapping routines.....
3888  ***************************************************/
3889 
3890 /*
3891  * Add a wired page to the kva.
3892  * Note: not SMP coherent.
3893  */
3894 PMAP_INLINE void
3895 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
3896 {
3897 	pt_entry_t *pte;
3898 
3899 	pte = vtopte(va);
3900 	pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M |
3901 	    X86_PG_RW | X86_PG_V);
3902 }
3903 
3904 static __inline void
3905 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
3906 {
3907 	pt_entry_t *pte;
3908 	int cache_bits;
3909 
3910 	pte = vtopte(va);
3911 	cache_bits = pmap_cache_bits(kernel_pmap, mode, 0);
3912 	pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M |
3913 	    X86_PG_RW | X86_PG_V | cache_bits);
3914 }
3915 
3916 /*
3917  * Remove a page from the kernel pagetables.
3918  * Note: not SMP coherent.
3919  */
3920 PMAP_INLINE void
3921 pmap_kremove(vm_offset_t va)
3922 {
3923 	pt_entry_t *pte;
3924 
3925 	pte = vtopte(va);
3926 	pte_clear(pte);
3927 }
3928 
3929 /*
3930  *	Used to map a range of physical addresses into kernel
3931  *	virtual address space.
3932  *
3933  *	The value passed in '*virt' is a suggested virtual address for
3934  *	the mapping. Architectures which can support a direct-mapped
3935  *	physical to virtual region can return the appropriate address
3936  *	within that region, leaving '*virt' unchanged. Other
3937  *	architectures should map the pages starting at '*virt' and
3938  *	update '*virt' with the first usable address after the mapped
3939  *	region.
3940  */
3941 vm_offset_t
3942 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
3943 {
3944 	return PHYS_TO_DMAP(start);
3945 }
3946 
3947 /*
3948  * Add a list of wired pages to the kva
3949  * this routine is only used for temporary
3950  * kernel mappings that do not need to have
3951  * page modification or references recorded.
3952  * Note that old mappings are simply written
3953  * over.  The page *must* be wired.
3954  * Note: SMP coherent.  Uses a ranged shootdown IPI.
3955  */
3956 void
3957 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
3958 {
3959 	pt_entry_t *endpte, oldpte, pa, *pte;
3960 	vm_page_t m;
3961 	int cache_bits;
3962 
3963 	oldpte = 0;
3964 	pte = vtopte(sva);
3965 	endpte = pte + count;
3966 	while (pte < endpte) {
3967 		m = *ma++;
3968 		cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
3969 		pa = VM_PAGE_TO_PHYS(m) | cache_bits;
3970 		if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
3971 			oldpte |= *pte;
3972 			pte_store(pte, pa | pg_g | pg_nx | X86_PG_A |
3973 			    X86_PG_M | X86_PG_RW | X86_PG_V);
3974 		}
3975 		pte++;
3976 	}
3977 	if (__predict_false((oldpte & X86_PG_V) != 0))
3978 		pmap_invalidate_range(kernel_pmap, sva, sva + count *
3979 		    PAGE_SIZE);
3980 }
3981 
3982 /*
3983  * This routine tears out page mappings from the
3984  * kernel -- it is meant only for temporary mappings.
3985  * Note: SMP coherent.  Uses a ranged shootdown IPI.
3986  */
3987 void
3988 pmap_qremove(vm_offset_t sva, int count)
3989 {
3990 	vm_offset_t va;
3991 
3992 	va = sva;
3993 	while (count-- > 0) {
3994 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
3995 		pmap_kremove(va);
3996 		va += PAGE_SIZE;
3997 	}
3998 	pmap_invalidate_range(kernel_pmap, sva, va);
3999 }
4000 
4001 /***************************************************
4002  * Page table page management routines.....
4003  ***************************************************/
4004 /*
4005  * Schedule the specified unused page table page to be freed.  Specifically,
4006  * add the page to the specified list of pages that will be released to the
4007  * physical memory manager after the TLB has been updated.
4008  */
4009 static __inline void
4010 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
4011     boolean_t set_PG_ZERO)
4012 {
4013 
4014 	if (set_PG_ZERO)
4015 		m->flags |= PG_ZERO;
4016 	else
4017 		m->flags &= ~PG_ZERO;
4018 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4019 }
4020 
4021 /*
4022  * Inserts the specified page table page into the specified pmap's collection
4023  * of idle page table pages.  Each of a pmap's page table pages is responsible
4024  * for mapping a distinct range of virtual addresses.  The pmap's collection is
4025  * ordered by this virtual address range.
4026  *
4027  * If "promoted" is false, then the page table page "mpte" must be zero filled.
4028  */
4029 static __inline int
4030 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
4031 {
4032 
4033 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4034 	mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
4035 	return (vm_radix_insert(&pmap->pm_root, mpte));
4036 }
4037 
4038 /*
4039  * Removes the page table page mapping the specified virtual address from the
4040  * specified pmap's collection of idle page table pages, and returns it.
4041  * Otherwise, returns NULL if there is no page table page corresponding to the
4042  * specified virtual address.
4043  */
4044 static __inline vm_page_t
4045 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4046 {
4047 
4048 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4049 	return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
4050 }
4051 
4052 /*
4053  * Decrements a page table page's reference count, which is used to record the
4054  * number of valid page table entries within the page.  If the reference count
4055  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
4056  * page table page was unmapped and FALSE otherwise.
4057  */
4058 static inline boolean_t
4059 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4060 {
4061 
4062 	--m->ref_count;
4063 	if (m->ref_count == 0) {
4064 		_pmap_unwire_ptp(pmap, va, m, free);
4065 		return (TRUE);
4066 	} else
4067 		return (FALSE);
4068 }
4069 
4070 static void
4071 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4072 {
4073 	pml5_entry_t *pml5;
4074 	pml4_entry_t *pml4;
4075 	pdp_entry_t *pdp;
4076 	pd_entry_t *pd;
4077 	vm_page_t pdpg, pdppg, pml4pg;
4078 
4079 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4080 
4081 	/*
4082 	 * unmap the page table page
4083 	 */
4084 	if (m->pindex >= NUPDE + NUPDPE + NUPML4E) {
4085 		/* PML4 page */
4086 		MPASS(pmap_is_la57(pmap));
4087 		pml5 = pmap_pml5e(pmap, va);
4088 		*pml5 = 0;
4089 		if (pmap->pm_pmltopu != NULL && va <= VM_MAXUSER_ADDRESS) {
4090 			pml5 = pmap_pml5e_u(pmap, va);
4091 			*pml5 = 0;
4092 		}
4093 	} else if (m->pindex >= NUPDE + NUPDPE) {
4094 		/* PDP page */
4095 		pml4 = pmap_pml4e(pmap, va);
4096 		*pml4 = 0;
4097 		if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
4098 		    va <= VM_MAXUSER_ADDRESS) {
4099 			pml4 = pmap_pml4e_u(pmap, va);
4100 			*pml4 = 0;
4101 		}
4102 	} else if (m->pindex >= NUPDE) {
4103 		/* PD page */
4104 		pdp = pmap_pdpe(pmap, va);
4105 		*pdp = 0;
4106 	} else {
4107 		/* PTE page */
4108 		pd = pmap_pde(pmap, va);
4109 		*pd = 0;
4110 	}
4111 	if (m->pindex < NUPDE) {
4112 		/* We just released a PT, unhold the matching PD */
4113 		pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
4114 		pmap_unwire_ptp(pmap, va, pdpg, free);
4115 	} else if (m->pindex < NUPDE + NUPDPE) {
4116 		/* We just released a PD, unhold the matching PDP */
4117 		pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
4118 		pmap_unwire_ptp(pmap, va, pdppg, free);
4119 	} else if (m->pindex < NUPDE + NUPDPE + NUPML4E && pmap_is_la57(pmap)) {
4120 		/* We just released a PDP, unhold the matching PML4 */
4121 		pml4pg = PHYS_TO_VM_PAGE(*pmap_pml5e(pmap, va) & PG_FRAME);
4122 		pmap_unwire_ptp(pmap, va, pml4pg, free);
4123 	}
4124 
4125 	pmap_pt_page_count_adj(pmap, -1);
4126 
4127 	/*
4128 	 * Put page on a list so that it is released after
4129 	 * *ALL* TLB shootdown is done
4130 	 */
4131 	pmap_add_delayed_free_list(m, free, TRUE);
4132 }
4133 
4134 /*
4135  * After removing a page table entry, this routine is used to
4136  * conditionally free the page, and manage the reference count.
4137  */
4138 static int
4139 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
4140     struct spglist *free)
4141 {
4142 	vm_page_t mpte;
4143 
4144 	if (va >= VM_MAXUSER_ADDRESS)
4145 		return (0);
4146 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4147 	mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4148 	return (pmap_unwire_ptp(pmap, va, mpte, free));
4149 }
4150 
4151 /*
4152  * Release a page table page reference after a failed attempt to create a
4153  * mapping.
4154  */
4155 static void
4156 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
4157 {
4158 	struct spglist free;
4159 
4160 	SLIST_INIT(&free);
4161 	if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
4162 		/*
4163 		 * Although "va" was never mapped, paging-structure caches
4164 		 * could nonetheless have entries that refer to the freed
4165 		 * page table pages.  Invalidate those entries.
4166 		 */
4167 		pmap_invalidate_page(pmap, va);
4168 		vm_page_free_pages_toq(&free, true);
4169 	}
4170 }
4171 
4172 void
4173 pmap_pinit0(pmap_t pmap)
4174 {
4175 	struct proc *p;
4176 	struct thread *td;
4177 	int i;
4178 
4179 	PMAP_LOCK_INIT(pmap);
4180 	pmap->pm_pmltop = kernel_pmap->pm_pmltop;
4181 	pmap->pm_pmltopu = NULL;
4182 	pmap->pm_cr3 = kernel_pmap->pm_cr3;
4183 	/* hack to keep pmap_pti_pcid_invalidate() alive */
4184 	pmap->pm_ucr3 = PMAP_NO_CR3;
4185 	vm_radix_init(&pmap->pm_root);
4186 	CPU_ZERO(&pmap->pm_active);
4187 	TAILQ_INIT(&pmap->pm_pvchunk);
4188 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4189 	pmap->pm_flags = pmap_flags;
4190 	CPU_FOREACH(i) {
4191 		pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN + 1;
4192 		pmap->pm_pcids[i].pm_gen = 1;
4193 	}
4194 	pmap_activate_boot(pmap);
4195 	td = curthread;
4196 	if (pti) {
4197 		p = td->td_proc;
4198 		PROC_LOCK(p);
4199 		p->p_md.md_flags |= P_MD_KPTI;
4200 		PROC_UNLOCK(p);
4201 	}
4202 	pmap_thread_init_invl_gen(td);
4203 
4204 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4205 		pmap_pkru_ranges_zone = uma_zcreate("pkru ranges",
4206 		    sizeof(struct pmap_pkru_range), NULL, NULL, NULL, NULL,
4207 		    UMA_ALIGN_PTR, 0);
4208 	}
4209 }
4210 
4211 void
4212 pmap_pinit_pml4(vm_page_t pml4pg)
4213 {
4214 	pml4_entry_t *pm_pml4;
4215 	int i;
4216 
4217 	pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
4218 
4219 	/* Wire in kernel global address entries. */
4220 	for (i = 0; i < NKPML4E; i++) {
4221 		pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
4222 		    X86_PG_V;
4223 	}
4224 #ifdef KASAN
4225 	for (i = 0; i < NKASANPML4E; i++) {
4226 		pm_pml4[KASANPML4I + i] = (KASANPDPphys + ptoa(i)) | X86_PG_RW |
4227 		    X86_PG_V | pg_nx;
4228 	}
4229 #endif
4230 #ifdef KMSAN
4231 	for (i = 0; i < NKMSANSHADPML4E; i++) {
4232 		pm_pml4[KMSANSHADPML4I + i] = (KMSANSHADPDPphys + ptoa(i)) |
4233 		    X86_PG_RW | X86_PG_V | pg_nx;
4234 	}
4235 	for (i = 0; i < NKMSANORIGPML4E; i++) {
4236 		pm_pml4[KMSANORIGPML4I + i] = (KMSANORIGPDPphys + ptoa(i)) |
4237 		    X86_PG_RW | X86_PG_V | pg_nx;
4238 	}
4239 #endif
4240 	for (i = 0; i < ndmpdpphys; i++) {
4241 		pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
4242 		    X86_PG_V;
4243 	}
4244 
4245 	/* install self-referential address mapping entry(s) */
4246 	pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
4247 	    X86_PG_A | X86_PG_M;
4248 
4249 	/* install large map entries if configured */
4250 	for (i = 0; i < lm_ents; i++)
4251 		pm_pml4[LMSPML4I + i] = kernel_pmap->pm_pmltop[LMSPML4I + i];
4252 }
4253 
4254 void
4255 pmap_pinit_pml5(vm_page_t pml5pg)
4256 {
4257 	pml5_entry_t *pm_pml5;
4258 
4259 	pm_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pg));
4260 
4261 	/*
4262 	 * Add pml5 entry at top of KVA pointing to existing pml4 table,
4263 	 * entering all existing kernel mappings into level 5 table.
4264 	 */
4265 	pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
4266 	    X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
4267 	    pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
4268 
4269 	/*
4270 	 * Install self-referential address mapping entry.
4271 	 */
4272 	pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) |
4273 	    X86_PG_RW | X86_PG_V | X86_PG_M | X86_PG_A |
4274 	    pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
4275 }
4276 
4277 static void
4278 pmap_pinit_pml4_pti(vm_page_t pml4pgu)
4279 {
4280 	pml4_entry_t *pm_pml4u;
4281 	int i;
4282 
4283 	pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pgu));
4284 	for (i = 0; i < NPML4EPG; i++)
4285 		pm_pml4u[i] = pti_pml4[i];
4286 }
4287 
4288 static void
4289 pmap_pinit_pml5_pti(vm_page_t pml5pgu)
4290 {
4291 	pml5_entry_t *pm_pml5u;
4292 
4293 	pm_pml5u = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pgu));
4294 	pagezero(pm_pml5u);
4295 
4296 	/*
4297 	 * Add pml5 entry at top of KVA pointing to existing pml4 pti
4298 	 * table, entering all kernel mappings needed for usermode
4299 	 * into level 5 table.
4300 	 */
4301 	pm_pml5u[pmap_pml5e_index(UPT_MAX_ADDRESS)] =
4302 	    pmap_kextract((vm_offset_t)pti_pml4) |
4303 	    X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
4304 	    pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
4305 }
4306 
4307 /* Allocate a page table page and do related bookkeeping */
4308 static vm_page_t
4309 pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags)
4310 {
4311 	vm_page_t m;
4312 
4313 	m = vm_page_alloc_noobj(flags);
4314 	if (__predict_false(m == NULL))
4315 		return (NULL);
4316 	m->pindex = pindex;
4317 	pmap_pt_page_count_adj(pmap, 1);
4318 	return (m);
4319 }
4320 
4321 static void
4322 pmap_free_pt_page(pmap_t pmap, vm_page_t m, bool zerofilled)
4323 {
4324 	/*
4325 	 * This function assumes the page will need to be unwired,
4326 	 * even though the counterpart allocation in pmap_alloc_pt_page()
4327 	 * doesn't enforce VM_ALLOC_WIRED.  However, all current uses
4328 	 * of pmap_free_pt_page() require unwiring.  The case in which
4329 	 * a PT page doesn't require unwiring because its ref_count has
4330 	 * naturally reached 0 is handled through _pmap_unwire_ptp().
4331 	 */
4332 	vm_page_unwire_noq(m);
4333 	if (zerofilled)
4334 		vm_page_free_zero(m);
4335 	else
4336 		vm_page_free(m);
4337 
4338 	pmap_pt_page_count_adj(pmap, -1);
4339 }
4340 
4341 /*
4342  * Initialize a preallocated and zeroed pmap structure,
4343  * such as one in a vmspace structure.
4344  */
4345 int
4346 pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
4347 {
4348 	vm_page_t pmltop_pg, pmltop_pgu;
4349 	vm_paddr_t pmltop_phys;
4350 	int i;
4351 
4352 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4353 
4354 	/*
4355 	 * Allocate the page directory page.  Pass NULL instead of a
4356 	 * pointer to the pmap here to avoid calling
4357 	 * pmap_resident_count_adj() through pmap_pt_page_count_adj(),
4358 	 * since that requires pmap lock.  Instead do the accounting
4359 	 * manually.
4360 	 *
4361 	 * Note that final call to pmap_remove() optimization that
4362 	 * checks for zero resident_count is basically disabled by
4363 	 * accounting for top-level page.  But the optimization was
4364 	 * not effective since we started using non-managed mapping of
4365 	 * the shared page.
4366 	 */
4367 	pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_WIRED | VM_ALLOC_ZERO |
4368 	    VM_ALLOC_WAITOK);
4369 	pmap_pt_page_count_pinit(pmap, 1);
4370 
4371 	pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg);
4372 	pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys);
4373 
4374 	CPU_FOREACH(i) {
4375 		pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
4376 		pmap->pm_pcids[i].pm_gen = 0;
4377 	}
4378 	pmap->pm_cr3 = PMAP_NO_CR3;	/* initialize to an invalid value */
4379 	pmap->pm_ucr3 = PMAP_NO_CR3;
4380 	pmap->pm_pmltopu = NULL;
4381 
4382 	pmap->pm_type = pm_type;
4383 
4384 	/*
4385 	 * Do not install the host kernel mappings in the nested page
4386 	 * tables. These mappings are meaningless in the guest physical
4387 	 * address space.
4388 	 * Install minimal kernel mappings in PTI case.
4389 	 */
4390 	switch (pm_type) {
4391 	case PT_X86:
4392 		pmap->pm_cr3 = pmltop_phys;
4393 		if (pmap_is_la57(pmap))
4394 			pmap_pinit_pml5(pmltop_pg);
4395 		else
4396 			pmap_pinit_pml4(pmltop_pg);
4397 		if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
4398 			/*
4399 			 * As with pmltop_pg, pass NULL instead of a
4400 			 * pointer to the pmap to ensure that the PTI
4401 			 * page counted explicitly.
4402 			 */
4403 			pmltop_pgu = pmap_alloc_pt_page(NULL, 0,
4404 			    VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
4405 			pmap_pt_page_count_pinit(pmap, 1);
4406 			pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP(
4407 			    VM_PAGE_TO_PHYS(pmltop_pgu));
4408 			if (pmap_is_la57(pmap))
4409 				pmap_pinit_pml5_pti(pmltop_pgu);
4410 			else
4411 				pmap_pinit_pml4_pti(pmltop_pgu);
4412 			pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pmltop_pgu);
4413 		}
4414 		if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4415 			rangeset_init(&pmap->pm_pkru, pkru_dup_range,
4416 			    pkru_free_range, pmap, M_NOWAIT);
4417 		}
4418 		break;
4419 	case PT_EPT:
4420 	case PT_RVI:
4421 		pmap->pm_eptsmr = smr_create("pmap", 0, 0);
4422 		break;
4423 	}
4424 
4425 	vm_radix_init(&pmap->pm_root);
4426 	CPU_ZERO(&pmap->pm_active);
4427 	TAILQ_INIT(&pmap->pm_pvchunk);
4428 	pmap->pm_flags = flags;
4429 	pmap->pm_eptgen = 0;
4430 
4431 	return (1);
4432 }
4433 
4434 int
4435 pmap_pinit(pmap_t pmap)
4436 {
4437 
4438 	return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
4439 }
4440 
4441 static void
4442 pmap_allocpte_free_unref(pmap_t pmap, vm_offset_t va, pt_entry_t *pte)
4443 {
4444 	vm_page_t mpg;
4445 	struct spglist free;
4446 
4447 	mpg = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
4448 	if (mpg->ref_count != 0)
4449 		return;
4450 	SLIST_INIT(&free);
4451 	_pmap_unwire_ptp(pmap, va, mpg, &free);
4452 	pmap_invalidate_page(pmap, va);
4453 	vm_page_free_pages_toq(&free, true);
4454 }
4455 
4456 static pml4_entry_t *
4457 pmap_allocpte_getpml4(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4458     bool addref)
4459 {
4460 	vm_pindex_t pml5index;
4461 	pml5_entry_t *pml5;
4462 	pml4_entry_t *pml4;
4463 	vm_page_t pml4pg;
4464 	pt_entry_t PG_V;
4465 	bool allocated;
4466 
4467 	if (!pmap_is_la57(pmap))
4468 		return (&pmap->pm_pmltop[pmap_pml4e_index(va)]);
4469 
4470 	PG_V = pmap_valid_bit(pmap);
4471 	pml5index = pmap_pml5e_index(va);
4472 	pml5 = &pmap->pm_pmltop[pml5index];
4473 	if ((*pml5 & PG_V) == 0) {
4474 		if (pmap_allocpte_nosleep(pmap, pmap_pml5e_pindex(va), lockp,
4475 		    va) == NULL)
4476 			return (NULL);
4477 		allocated = true;
4478 	} else {
4479 		allocated = false;
4480 	}
4481 	pml4 = (pml4_entry_t *)PHYS_TO_DMAP(*pml5 & PG_FRAME);
4482 	pml4 = &pml4[pmap_pml4e_index(va)];
4483 	if ((*pml4 & PG_V) == 0) {
4484 		pml4pg = PHYS_TO_VM_PAGE(*pml5 & PG_FRAME);
4485 		if (allocated && !addref)
4486 			pml4pg->ref_count--;
4487 		else if (!allocated && addref)
4488 			pml4pg->ref_count++;
4489 	}
4490 	return (pml4);
4491 }
4492 
4493 static pdp_entry_t *
4494 pmap_allocpte_getpdp(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4495     bool addref)
4496 {
4497 	vm_page_t pdppg;
4498 	pml4_entry_t *pml4;
4499 	pdp_entry_t *pdp;
4500 	pt_entry_t PG_V;
4501 	bool allocated;
4502 
4503 	PG_V = pmap_valid_bit(pmap);
4504 
4505 	pml4 = pmap_allocpte_getpml4(pmap, lockp, va, false);
4506 	if (pml4 == NULL)
4507 		return (NULL);
4508 
4509 	if ((*pml4 & PG_V) == 0) {
4510 		/* Have to allocate a new pdp, recurse */
4511 		if (pmap_allocpte_nosleep(pmap, pmap_pml4e_pindex(va), lockp,
4512 		    va) == NULL) {
4513 			if (pmap_is_la57(pmap))
4514 				pmap_allocpte_free_unref(pmap, va,
4515 				    pmap_pml5e(pmap, va));
4516 			return (NULL);
4517 		}
4518 		allocated = true;
4519 	} else {
4520 		allocated = false;
4521 	}
4522 	pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
4523 	pdp = &pdp[pmap_pdpe_index(va)];
4524 	if ((*pdp & PG_V) == 0) {
4525 		pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
4526 		if (allocated && !addref)
4527 			pdppg->ref_count--;
4528 		else if (!allocated && addref)
4529 			pdppg->ref_count++;
4530 	}
4531 	return (pdp);
4532 }
4533 
4534 /*
4535  * The ptepindexes, i.e. page indices, of the page table pages encountered
4536  * while translating virtual address va are defined as follows:
4537  * - for the page table page (last level),
4538  *      ptepindex = pmap_pde_pindex(va) = va >> PDRSHIFT,
4539  *   in other words, it is just the index of the PDE that maps the page
4540  *   table page.
4541  * - for the page directory page,
4542  *      ptepindex = NUPDE (number of userland PD entries) +
4543  *          (pmap_pde_index(va) >> NPDEPGSHIFT)
4544  *   i.e. index of PDPE is put after the last index of PDE,
4545  * - for the page directory pointer page,
4546  *      ptepindex = NUPDE + NUPDPE + (pmap_pde_index(va) >> (NPDEPGSHIFT +
4547  *          NPML4EPGSHIFT),
4548  *   i.e. index of pml4e is put after the last index of PDPE,
4549  * - for the PML4 page (if LA57 mode is enabled),
4550  *      ptepindex = NUPDE + NUPDPE + NUPML4E + (pmap_pde_index(va) >>
4551  *          (NPDEPGSHIFT + NPML4EPGSHIFT + NPML5EPGSHIFT),
4552  *   i.e. index of pml5e is put after the last index of PML4E.
4553  *
4554  * Define an order on the paging entries, where all entries of the
4555  * same height are put together, then heights are put from deepest to
4556  * root.  Then ptexpindex is the sequential number of the
4557  * corresponding paging entry in this order.
4558  *
4559  * The values of NUPDE, NUPDPE, and NUPML4E are determined by the size of
4560  * LA57 paging structures even in LA48 paging mode. Moreover, the
4561  * ptepindexes are calculated as if the paging structures were 5-level
4562  * regardless of the actual mode of operation.
4563  *
4564  * The root page at PML4/PML5 does not participate in this indexing scheme,
4565  * since it is statically allocated by pmap_pinit() and not by pmap_allocpte().
4566  */
4567 static vm_page_t
4568 pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4569     vm_offset_t va)
4570 {
4571 	vm_pindex_t pml5index, pml4index;
4572 	pml5_entry_t *pml5, *pml5u;
4573 	pml4_entry_t *pml4, *pml4u;
4574 	pdp_entry_t *pdp;
4575 	pd_entry_t *pd;
4576 	vm_page_t m, pdpg;
4577 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
4578 
4579 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4580 
4581 	PG_A = pmap_accessed_bit(pmap);
4582 	PG_M = pmap_modified_bit(pmap);
4583 	PG_V = pmap_valid_bit(pmap);
4584 	PG_RW = pmap_rw_bit(pmap);
4585 
4586 	/*
4587 	 * Allocate a page table page.
4588 	 */
4589 	m = pmap_alloc_pt_page(pmap, ptepindex,
4590 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
4591 	if (m == NULL)
4592 		return (NULL);
4593 
4594 	/*
4595 	 * Map the pagetable page into the process address space, if
4596 	 * it isn't already there.
4597 	 */
4598 	if (ptepindex >= NUPDE + NUPDPE + NUPML4E) {
4599 		MPASS(pmap_is_la57(pmap));
4600 
4601 		pml5index = pmap_pml5e_index(va);
4602 		pml5 = &pmap->pm_pmltop[pml5index];
4603 		KASSERT((*pml5 & PG_V) == 0,
4604 		    ("pmap %p va %#lx pml5 %#lx", pmap, va, *pml5));
4605 		*pml5 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4606 
4607 		if (pmap->pm_pmltopu != NULL && pml5index < NUPML5E) {
4608 			if (pmap->pm_ucr3 != PMAP_NO_CR3)
4609 				*pml5 |= pg_nx;
4610 
4611 			pml5u = &pmap->pm_pmltopu[pml5index];
4612 			*pml5u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4613 			    PG_A | PG_M;
4614 		}
4615 	} else if (ptepindex >= NUPDE + NUPDPE) {
4616 		pml4index = pmap_pml4e_index(va);
4617 		/* Wire up a new PDPE page */
4618 		pml4 = pmap_allocpte_getpml4(pmap, lockp, va, true);
4619 		if (pml4 == NULL) {
4620 			pmap_free_pt_page(pmap, m, true);
4621 			return (NULL);
4622 		}
4623 		KASSERT((*pml4 & PG_V) == 0,
4624 		    ("pmap %p va %#lx pml4 %#lx", pmap, va, *pml4));
4625 		*pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4626 
4627 		if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
4628 		    pml4index < NUPML4E) {
4629 			/*
4630 			 * PTI: Make all user-space mappings in the
4631 			 * kernel-mode page table no-execute so that
4632 			 * we detect any programming errors that leave
4633 			 * the kernel-mode page table active on return
4634 			 * to user space.
4635 			 */
4636 			if (pmap->pm_ucr3 != PMAP_NO_CR3)
4637 				*pml4 |= pg_nx;
4638 
4639 			pml4u = &pmap->pm_pmltopu[pml4index];
4640 			*pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4641 			    PG_A | PG_M;
4642 		}
4643 	} else if (ptepindex >= NUPDE) {
4644 		/* Wire up a new PDE page */
4645 		pdp = pmap_allocpte_getpdp(pmap, lockp, va, true);
4646 		if (pdp == NULL) {
4647 			pmap_free_pt_page(pmap, m, true);
4648 			return (NULL);
4649 		}
4650 		KASSERT((*pdp & PG_V) == 0,
4651 		    ("pmap %p va %#lx pdp %#lx", pmap, va, *pdp));
4652 		*pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4653 	} else {
4654 		/* Wire up a new PTE page */
4655 		pdp = pmap_allocpte_getpdp(pmap, lockp, va, false);
4656 		if (pdp == NULL) {
4657 			pmap_free_pt_page(pmap, m, true);
4658 			return (NULL);
4659 		}
4660 		if ((*pdp & PG_V) == 0) {
4661 			/* Have to allocate a new pd, recurse */
4662 		  if (pmap_allocpte_nosleep(pmap, pmap_pdpe_pindex(va),
4663 		      lockp, va) == NULL) {
4664 				pmap_allocpte_free_unref(pmap, va,
4665 				    pmap_pml4e(pmap, va));
4666 				pmap_free_pt_page(pmap, m, true);
4667 				return (NULL);
4668 			}
4669 		} else {
4670 			/* Add reference to the pd page */
4671 			pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
4672 			pdpg->ref_count++;
4673 		}
4674 		pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
4675 
4676 		/* Now we know where the page directory page is */
4677 		pd = &pd[pmap_pde_index(va)];
4678 		KASSERT((*pd & PG_V) == 0,
4679 		    ("pmap %p va %#lx pd %#lx", pmap, va, *pd));
4680 		*pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4681 	}
4682 
4683 	return (m);
4684 }
4685 
4686 /*
4687  * This routine is called if the desired page table page does not exist.
4688  *
4689  * If page table page allocation fails, this routine may sleep before
4690  * returning NULL.  It sleeps only if a lock pointer was given.  Sleep
4691  * occurs right before returning to the caller. This way, we never
4692  * drop pmap lock to sleep while a page table page has ref_count == 0,
4693  * which prevents the page from being freed under us.
4694  */
4695 static vm_page_t
4696 pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4697     vm_offset_t va)
4698 {
4699 	vm_page_t m;
4700 
4701 	m = pmap_allocpte_nosleep(pmap, ptepindex, lockp, va);
4702 	if (m == NULL && lockp != NULL) {
4703 		RELEASE_PV_LIST_LOCK(lockp);
4704 		PMAP_UNLOCK(pmap);
4705 		PMAP_ASSERT_NOT_IN_DI();
4706 		vm_wait(NULL);
4707 		PMAP_LOCK(pmap);
4708 	}
4709 	return (m);
4710 }
4711 
4712 static pd_entry_t *
4713 pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
4714     struct rwlock **lockp)
4715 {
4716 	pdp_entry_t *pdpe, PG_V;
4717 	pd_entry_t *pde;
4718 	vm_page_t pdpg;
4719 	vm_pindex_t pdpindex;
4720 
4721 	PG_V = pmap_valid_bit(pmap);
4722 
4723 retry:
4724 	pdpe = pmap_pdpe(pmap, va);
4725 	if (pdpe != NULL && (*pdpe & PG_V) != 0) {
4726 		pde = pmap_pdpe_to_pde(pdpe, va);
4727 		if (va < VM_MAXUSER_ADDRESS) {
4728 			/* Add a reference to the pd page. */
4729 			pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
4730 			pdpg->ref_count++;
4731 		} else
4732 			pdpg = NULL;
4733 	} else if (va < VM_MAXUSER_ADDRESS) {
4734 		/* Allocate a pd page. */
4735 		pdpindex = pmap_pde_pindex(va) >> NPDPEPGSHIFT;
4736 		pdpg = pmap_allocpte_alloc(pmap, NUPDE + pdpindex, lockp, va);
4737 		if (pdpg == NULL) {
4738 			if (lockp != NULL)
4739 				goto retry;
4740 			else
4741 				return (NULL);
4742 		}
4743 		pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4744 		pde = &pde[pmap_pde_index(va)];
4745 	} else
4746 		panic("pmap_alloc_pde: missing page table page for va %#lx",
4747 		    va);
4748 	*pdpgp = pdpg;
4749 	return (pde);
4750 }
4751 
4752 static vm_page_t
4753 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4754 {
4755 	vm_pindex_t ptepindex;
4756 	pd_entry_t *pd, PG_V;
4757 	vm_page_t m;
4758 
4759 	PG_V = pmap_valid_bit(pmap);
4760 
4761 	/*
4762 	 * Calculate pagetable page index
4763 	 */
4764 	ptepindex = pmap_pde_pindex(va);
4765 retry:
4766 	/*
4767 	 * Get the page directory entry
4768 	 */
4769 	pd = pmap_pde(pmap, va);
4770 
4771 	/*
4772 	 * This supports switching from a 2MB page to a
4773 	 * normal 4K page.
4774 	 */
4775 	if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
4776 		if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
4777 			/*
4778 			 * Invalidation of the 2MB page mapping may have caused
4779 			 * the deallocation of the underlying PD page.
4780 			 */
4781 			pd = NULL;
4782 		}
4783 	}
4784 
4785 	/*
4786 	 * If the page table page is mapped, we just increment the
4787 	 * hold count, and activate it.
4788 	 */
4789 	if (pd != NULL && (*pd & PG_V) != 0) {
4790 		m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
4791 		m->ref_count++;
4792 	} else {
4793 		/*
4794 		 * Here if the pte page isn't mapped, or if it has been
4795 		 * deallocated.
4796 		 */
4797 		m = pmap_allocpte_alloc(pmap, ptepindex, lockp, va);
4798 		if (m == NULL && lockp != NULL)
4799 			goto retry;
4800 	}
4801 	return (m);
4802 }
4803 
4804 /***************************************************
4805  * Pmap allocation/deallocation routines.
4806  ***************************************************/
4807 
4808 /*
4809  * Release any resources held by the given physical map.
4810  * Called when a pmap initialized by pmap_pinit is being released.
4811  * Should only be called if the map contains no valid mappings.
4812  */
4813 void
4814 pmap_release(pmap_t pmap)
4815 {
4816 	vm_page_t m;
4817 	int i;
4818 
4819 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
4820 	    ("pmap_release: pmap %p has reserved page table page(s)",
4821 	    pmap));
4822 	KASSERT(CPU_EMPTY(&pmap->pm_active),
4823 	    ("releasing active pmap %p", pmap));
4824 
4825 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pmltop));
4826 
4827 	if (pmap_is_la57(pmap)) {
4828 		pmap->pm_pmltop[pmap_pml5e_index(UPT_MAX_ADDRESS)] = 0;
4829 		pmap->pm_pmltop[PML5PML5I] = 0;
4830 	} else {
4831 		for (i = 0; i < NKPML4E; i++)	/* KVA */
4832 			pmap->pm_pmltop[KPML4BASE + i] = 0;
4833 #ifdef KASAN
4834 		for (i = 0; i < NKASANPML4E; i++) /* KASAN shadow map */
4835 			pmap->pm_pmltop[KASANPML4I + i] = 0;
4836 #endif
4837 #ifdef KMSAN
4838 		for (i = 0; i < NKMSANSHADPML4E; i++) /* KMSAN shadow map */
4839 			pmap->pm_pmltop[KMSANSHADPML4I + i] = 0;
4840 		for (i = 0; i < NKMSANORIGPML4E; i++) /* KMSAN shadow map */
4841 			pmap->pm_pmltop[KMSANORIGPML4I + i] = 0;
4842 #endif
4843 		for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
4844 			pmap->pm_pmltop[DMPML4I + i] = 0;
4845 		pmap->pm_pmltop[PML4PML4I] = 0;	/* Recursive Mapping */
4846 		for (i = 0; i < lm_ents; i++)	/* Large Map */
4847 			pmap->pm_pmltop[LMSPML4I + i] = 0;
4848 	}
4849 
4850 	pmap_free_pt_page(NULL, m, true);
4851 	pmap_pt_page_count_pinit(pmap, -1);
4852 
4853 	if (pmap->pm_pmltopu != NULL) {
4854 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->
4855 		    pm_pmltopu));
4856 		pmap_free_pt_page(NULL, m, false);
4857 		pmap_pt_page_count_pinit(pmap, -1);
4858 	}
4859 	if (pmap->pm_type == PT_X86 &&
4860 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
4861 		rangeset_fini(&pmap->pm_pkru);
4862 
4863 	KASSERT(pmap->pm_stats.resident_count == 0,
4864 	    ("pmap_release: pmap %p resident count %ld != 0",
4865 	    pmap, pmap->pm_stats.resident_count));
4866 }
4867 
4868 static int
4869 kvm_size(SYSCTL_HANDLER_ARGS)
4870 {
4871 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
4872 
4873 	return sysctl_handle_long(oidp, &ksize, 0, req);
4874 }
4875 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4876     0, 0, kvm_size, "LU",
4877     "Size of KVM");
4878 
4879 static int
4880 kvm_free(SYSCTL_HANDLER_ARGS)
4881 {
4882 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
4883 
4884 	return sysctl_handle_long(oidp, &kfree, 0, req);
4885 }
4886 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4887     0, 0, kvm_free, "LU",
4888     "Amount of KVM free");
4889 
4890 #ifdef KMSAN
4891 static void
4892 pmap_kmsan_shadow_map_page_array(vm_paddr_t pdppa, vm_size_t size)
4893 {
4894 	pdp_entry_t *pdpe;
4895 	pd_entry_t *pde;
4896 	pt_entry_t *pte;
4897 	vm_paddr_t dummypa, dummypd, dummypt;
4898 	int i, npde, npdpg;
4899 
4900 	npdpg = howmany(size, NBPDP);
4901 	npde = size / NBPDR;
4902 
4903 	dummypa = vm_phys_early_alloc(-1, PAGE_SIZE);
4904 	pagezero((void *)PHYS_TO_DMAP(dummypa));
4905 
4906 	dummypt = vm_phys_early_alloc(-1, PAGE_SIZE);
4907 	pagezero((void *)PHYS_TO_DMAP(dummypt));
4908 	dummypd = vm_phys_early_alloc(-1, PAGE_SIZE * npdpg);
4909 	for (i = 0; i < npdpg; i++)
4910 		pagezero((void *)PHYS_TO_DMAP(dummypd + ptoa(i)));
4911 
4912 	pte = (pt_entry_t *)PHYS_TO_DMAP(dummypt);
4913 	for (i = 0; i < NPTEPG; i++)
4914 		pte[i] = (pt_entry_t)(dummypa | X86_PG_V | X86_PG_RW |
4915 		    X86_PG_A | X86_PG_M | pg_nx);
4916 
4917 	pde = (pd_entry_t *)PHYS_TO_DMAP(dummypd);
4918 	for (i = 0; i < npde; i++)
4919 		pde[i] = (pd_entry_t)(dummypt | X86_PG_V | X86_PG_RW | pg_nx);
4920 
4921 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(pdppa);
4922 	for (i = 0; i < npdpg; i++)
4923 		pdpe[i] = (pdp_entry_t)(dummypd + ptoa(i) | X86_PG_V |
4924 		    X86_PG_RW | pg_nx);
4925 }
4926 
4927 static void
4928 pmap_kmsan_page_array_startup(vm_offset_t start, vm_offset_t end)
4929 {
4930 	vm_size_t size;
4931 
4932 	KASSERT(start % NBPDP == 0, ("unaligned page array start address"));
4933 
4934 	/*
4935 	 * The end of the page array's KVA region is 2MB aligned, see
4936 	 * kmem_init().
4937 	 */
4938 	size = round_2mpage(end) - start;
4939 	pmap_kmsan_shadow_map_page_array(KMSANSHADPDPphys, size);
4940 	pmap_kmsan_shadow_map_page_array(KMSANORIGPDPphys, size);
4941 }
4942 #endif
4943 
4944 /*
4945  * Allocate physical memory for the vm_page array and map it into KVA,
4946  * attempting to back the vm_pages with domain-local memory.
4947  */
4948 void
4949 pmap_page_array_startup(long pages)
4950 {
4951 	pdp_entry_t *pdpe;
4952 	pd_entry_t *pde, newpdir;
4953 	vm_offset_t va, start, end;
4954 	vm_paddr_t pa;
4955 	long pfn;
4956 	int domain, i;
4957 
4958 	vm_page_array_size = pages;
4959 
4960 	start = VM_MIN_KERNEL_ADDRESS;
4961 	end = start + pages * sizeof(struct vm_page);
4962 	for (va = start; va < end; va += NBPDR) {
4963 		pfn = first_page + (va - start) / sizeof(struct vm_page);
4964 		domain = vm_phys_domain(ptoa(pfn));
4965 		pdpe = pmap_pdpe(kernel_pmap, va);
4966 		if ((*pdpe & X86_PG_V) == 0) {
4967 			pa = vm_phys_early_alloc(domain, PAGE_SIZE);
4968 			dump_add_page(pa);
4969 			pagezero((void *)PHYS_TO_DMAP(pa));
4970 			*pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW |
4971 			    X86_PG_A | X86_PG_M);
4972 		}
4973 		pde = pmap_pdpe_to_pde(pdpe, va);
4974 		if ((*pde & X86_PG_V) != 0)
4975 			panic("Unexpected pde");
4976 		pa = vm_phys_early_alloc(domain, NBPDR);
4977 		for (i = 0; i < NPDEPG; i++)
4978 			dump_add_page(pa + i * PAGE_SIZE);
4979 		newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A |
4980 		    X86_PG_M | PG_PS | pg_g | pg_nx);
4981 		pde_store(pde, newpdir);
4982 	}
4983 	vm_page_array = (vm_page_t)start;
4984 
4985 #ifdef KMSAN
4986 	pmap_kmsan_page_array_startup(start, end);
4987 #endif
4988 }
4989 
4990 /*
4991  * grow the number of kernel page table entries, if needed
4992  */
4993 void
4994 pmap_growkernel(vm_offset_t addr)
4995 {
4996 	vm_paddr_t paddr;
4997 	vm_page_t nkpg;
4998 	pd_entry_t *pde, newpdir;
4999 	pdp_entry_t *pdpe;
5000 
5001 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
5002 
5003 	/*
5004 	 * Return if "addr" is within the range of kernel page table pages
5005 	 * that were preallocated during pmap bootstrap.  Moreover, leave
5006 	 * "kernel_vm_end" and the kernel page table as they were.
5007 	 *
5008 	 * The correctness of this action is based on the following
5009 	 * argument: vm_map_insert() allocates contiguous ranges of the
5010 	 * kernel virtual address space.  It calls this function if a range
5011 	 * ends after "kernel_vm_end".  If the kernel is mapped between
5012 	 * "kernel_vm_end" and "addr", then the range cannot begin at
5013 	 * "kernel_vm_end".  In fact, its beginning address cannot be less
5014 	 * than the kernel.  Thus, there is no immediate need to allocate
5015 	 * any new kernel page table pages between "kernel_vm_end" and
5016 	 * "KERNBASE".
5017 	 */
5018 	if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
5019 		return;
5020 
5021 	addr = roundup2(addr, NBPDR);
5022 	if (addr - 1 >= vm_map_max(kernel_map))
5023 		addr = vm_map_max(kernel_map);
5024 	if (kernel_vm_end < addr)
5025 		kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
5026 	if (kernel_vm_end < addr)
5027 		kmsan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
5028 	while (kernel_vm_end < addr) {
5029 		pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
5030 		if ((*pdpe & X86_PG_V) == 0) {
5031 			/* We need a new PDP entry */
5032 			nkpg = pmap_alloc_pt_page(kernel_pmap,
5033 			    kernel_vm_end >> PDPSHIFT, VM_ALLOC_WIRED |
5034 			    VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO);
5035 			if (nkpg == NULL)
5036 				panic("pmap_growkernel: no memory to grow kernel");
5037 			paddr = VM_PAGE_TO_PHYS(nkpg);
5038 			*pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
5039 			    X86_PG_A | X86_PG_M);
5040 			continue; /* try again */
5041 		}
5042 		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
5043 		if ((*pde & X86_PG_V) != 0) {
5044 			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
5045 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
5046 				kernel_vm_end = vm_map_max(kernel_map);
5047 				break;
5048 			}
5049 			continue;
5050 		}
5051 
5052 		nkpg = pmap_alloc_pt_page(kernel_pmap,
5053 		    pmap_pde_pindex(kernel_vm_end), VM_ALLOC_WIRED |
5054 		    VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO);
5055 		if (nkpg == NULL)
5056 			panic("pmap_growkernel: no memory to grow kernel");
5057 		paddr = VM_PAGE_TO_PHYS(nkpg);
5058 		newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
5059 		pde_store(pde, newpdir);
5060 
5061 		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
5062 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
5063 			kernel_vm_end = vm_map_max(kernel_map);
5064 			break;
5065 		}
5066 	}
5067 }
5068 
5069 /***************************************************
5070  * page management routines.
5071  ***************************************************/
5072 
5073 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
5074 CTASSERT(_NPCM == 3);
5075 CTASSERT(_NPCPV == 168);
5076 
5077 static __inline struct pv_chunk *
5078 pv_to_chunk(pv_entry_t pv)
5079 {
5080 
5081 	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
5082 }
5083 
5084 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
5085 
5086 #define	PC_FREE0	0xfffffffffffffffful
5087 #define	PC_FREE1	0xfffffffffffffffful
5088 #define	PC_FREE2	0x000000fffffffffful
5089 
5090 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
5091 
5092 #ifdef PV_STATS
5093 
5094 static COUNTER_U64_DEFINE_EARLY(pc_chunk_count);
5095 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
5096     &pc_chunk_count, "Current number of pv entry cnunks");
5097 
5098 static COUNTER_U64_DEFINE_EARLY(pc_chunk_allocs);
5099 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
5100     &pc_chunk_allocs, "Total number of pv entry chunks allocated");
5101 
5102 static COUNTER_U64_DEFINE_EARLY(pc_chunk_frees);
5103 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
5104     &pc_chunk_frees, "Total number of pv entry chunks freed");
5105 
5106 static COUNTER_U64_DEFINE_EARLY(pc_chunk_tryfail);
5107 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
5108     &pc_chunk_tryfail,
5109     "Number of failed attempts to get a pv entry chunk page");
5110 
5111 static COUNTER_U64_DEFINE_EARLY(pv_entry_frees);
5112 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
5113     &pv_entry_frees, "Total number of pv entries freed");
5114 
5115 static COUNTER_U64_DEFINE_EARLY(pv_entry_allocs);
5116 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
5117     &pv_entry_allocs, "Total number of pv entries allocated");
5118 
5119 static COUNTER_U64_DEFINE_EARLY(pv_entry_count);
5120 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
5121     &pv_entry_count, "Current number of pv entries");
5122 
5123 static COUNTER_U64_DEFINE_EARLY(pv_entry_spare);
5124 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
5125     &pv_entry_spare, "Current number of spare pv entries");
5126 #endif
5127 
5128 static void
5129 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
5130 {
5131 
5132 	if (pmap == NULL)
5133 		return;
5134 	pmap_invalidate_all(pmap);
5135 	if (pmap != locked_pmap)
5136 		PMAP_UNLOCK(pmap);
5137 	if (start_di)
5138 		pmap_delayed_invl_finish();
5139 }
5140 
5141 /*
5142  * We are in a serious low memory condition.  Resort to
5143  * drastic measures to free some pages so we can allocate
5144  * another pv entry chunk.
5145  *
5146  * Returns NULL if PV entries were reclaimed from the specified pmap.
5147  *
5148  * We do not, however, unmap 2mpages because subsequent accesses will
5149  * allocate per-page pv entries until repromotion occurs, thereby
5150  * exacerbating the shortage of free pv entries.
5151  */
5152 static vm_page_t
5153 reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
5154 {
5155 	struct pv_chunks_list *pvc;
5156 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
5157 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
5158 	struct md_page *pvh;
5159 	pd_entry_t *pde;
5160 	pmap_t next_pmap, pmap;
5161 	pt_entry_t *pte, tpte;
5162 	pt_entry_t PG_G, PG_A, PG_M, PG_RW;
5163 	pv_entry_t pv;
5164 	vm_offset_t va;
5165 	vm_page_t m, m_pc;
5166 	struct spglist free;
5167 	uint64_t inuse;
5168 	int bit, field, freed;
5169 	bool start_di, restart;
5170 
5171 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
5172 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
5173 	pmap = NULL;
5174 	m_pc = NULL;
5175 	PG_G = PG_A = PG_M = PG_RW = 0;
5176 	SLIST_INIT(&free);
5177 	bzero(&pc_marker_b, sizeof(pc_marker_b));
5178 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
5179 	pc_marker = (struct pv_chunk *)&pc_marker_b;
5180 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
5181 
5182 	/*
5183 	 * A delayed invalidation block should already be active if
5184 	 * pmap_advise() or pmap_remove() called this function by way
5185 	 * of pmap_demote_pde_locked().
5186 	 */
5187 	start_di = pmap_not_in_di();
5188 
5189 	pvc = &pv_chunks[domain];
5190 	mtx_lock(&pvc->pvc_lock);
5191 	pvc->active_reclaims++;
5192 	TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru);
5193 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru);
5194 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
5195 	    SLIST_EMPTY(&free)) {
5196 		next_pmap = pc->pc_pmap;
5197 		if (next_pmap == NULL) {
5198 			/*
5199 			 * The next chunk is a marker.  However, it is
5200 			 * not our marker, so active_reclaims must be
5201 			 * > 1.  Consequently, the next_chunk code
5202 			 * will not rotate the pv_chunks list.
5203 			 */
5204 			goto next_chunk;
5205 		}
5206 		mtx_unlock(&pvc->pvc_lock);
5207 
5208 		/*
5209 		 * A pv_chunk can only be removed from the pc_lru list
5210 		 * when both pc_chunks_mutex is owned and the
5211 		 * corresponding pmap is locked.
5212 		 */
5213 		if (pmap != next_pmap) {
5214 			restart = false;
5215 			reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
5216 			    start_di);
5217 			pmap = next_pmap;
5218 			/* Avoid deadlock and lock recursion. */
5219 			if (pmap > locked_pmap) {
5220 				RELEASE_PV_LIST_LOCK(lockp);
5221 				PMAP_LOCK(pmap);
5222 				if (start_di)
5223 					pmap_delayed_invl_start();
5224 				mtx_lock(&pvc->pvc_lock);
5225 				restart = true;
5226 			} else if (pmap != locked_pmap) {
5227 				if (PMAP_TRYLOCK(pmap)) {
5228 					if (start_di)
5229 						pmap_delayed_invl_start();
5230 					mtx_lock(&pvc->pvc_lock);
5231 					restart = true;
5232 				} else {
5233 					pmap = NULL; /* pmap is not locked */
5234 					mtx_lock(&pvc->pvc_lock);
5235 					pc = TAILQ_NEXT(pc_marker, pc_lru);
5236 					if (pc == NULL ||
5237 					    pc->pc_pmap != next_pmap)
5238 						continue;
5239 					goto next_chunk;
5240 				}
5241 			} else if (start_di)
5242 				pmap_delayed_invl_start();
5243 			PG_G = pmap_global_bit(pmap);
5244 			PG_A = pmap_accessed_bit(pmap);
5245 			PG_M = pmap_modified_bit(pmap);
5246 			PG_RW = pmap_rw_bit(pmap);
5247 			if (restart)
5248 				continue;
5249 		}
5250 
5251 		/*
5252 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
5253 		 */
5254 		freed = 0;
5255 		for (field = 0; field < _NPCM; field++) {
5256 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
5257 			    inuse != 0; inuse &= ~(1UL << bit)) {
5258 				bit = bsfq(inuse);
5259 				pv = &pc->pc_pventry[field * 64 + bit];
5260 				va = pv->pv_va;
5261 				pde = pmap_pde(pmap, va);
5262 				if ((*pde & PG_PS) != 0)
5263 					continue;
5264 				pte = pmap_pde_to_pte(pde, va);
5265 				if ((*pte & PG_W) != 0)
5266 					continue;
5267 				tpte = pte_load_clear(pte);
5268 				if ((tpte & PG_G) != 0)
5269 					pmap_invalidate_page(pmap, va);
5270 				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
5271 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5272 					vm_page_dirty(m);
5273 				if ((tpte & PG_A) != 0)
5274 					vm_page_aflag_set(m, PGA_REFERENCED);
5275 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5276 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5277 				m->md.pv_gen++;
5278 				if (TAILQ_EMPTY(&m->md.pv_list) &&
5279 				    (m->flags & PG_FICTITIOUS) == 0) {
5280 					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5281 					if (TAILQ_EMPTY(&pvh->pv_list)) {
5282 						vm_page_aflag_clear(m,
5283 						    PGA_WRITEABLE);
5284 					}
5285 				}
5286 				pmap_delayed_invl_page(m);
5287 				pc->pc_map[field] |= 1UL << bit;
5288 				pmap_unuse_pt(pmap, va, *pde, &free);
5289 				freed++;
5290 			}
5291 		}
5292 		if (freed == 0) {
5293 			mtx_lock(&pvc->pvc_lock);
5294 			goto next_chunk;
5295 		}
5296 		/* Every freed mapping is for a 4 KB page. */
5297 		pmap_resident_count_adj(pmap, -freed);
5298 		PV_STAT(counter_u64_add(pv_entry_frees, freed));
5299 		PV_STAT(counter_u64_add(pv_entry_spare, freed));
5300 		PV_STAT(counter_u64_add(pv_entry_count, -freed));
5301 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5302 		if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
5303 		    pc->pc_map[2] == PC_FREE2) {
5304 			PV_STAT(counter_u64_add(pv_entry_spare, -_NPCPV));
5305 			PV_STAT(counter_u64_add(pc_chunk_count, -1));
5306 			PV_STAT(counter_u64_add(pc_chunk_frees, 1));
5307 			/* Entire chunk is free; return it. */
5308 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5309 			dump_drop_page(m_pc->phys_addr);
5310 			mtx_lock(&pvc->pvc_lock);
5311 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5312 			break;
5313 		}
5314 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5315 		mtx_lock(&pvc->pvc_lock);
5316 		/* One freed pv entry in locked_pmap is sufficient. */
5317 		if (pmap == locked_pmap)
5318 			break;
5319 next_chunk:
5320 		TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5321 		TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru);
5322 		if (pvc->active_reclaims == 1 && pmap != NULL) {
5323 			/*
5324 			 * Rotate the pv chunks list so that we do not
5325 			 * scan the same pv chunks that could not be
5326 			 * freed (because they contained a wired
5327 			 * and/or superpage mapping) on every
5328 			 * invocation of reclaim_pv_chunk().
5329 			 */
5330 			while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker) {
5331 				MPASS(pc->pc_pmap != NULL);
5332 				TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5333 				TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5334 			}
5335 		}
5336 	}
5337 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5338 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru);
5339 	pvc->active_reclaims--;
5340 	mtx_unlock(&pvc->pvc_lock);
5341 	reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
5342 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
5343 		m_pc = SLIST_FIRST(&free);
5344 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
5345 		/* Recycle a freed page table page. */
5346 		m_pc->ref_count = 1;
5347 	}
5348 	vm_page_free_pages_toq(&free, true);
5349 	return (m_pc);
5350 }
5351 
5352 static vm_page_t
5353 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
5354 {
5355 	vm_page_t m;
5356 	int i, domain;
5357 
5358 	domain = PCPU_GET(domain);
5359 	for (i = 0; i < vm_ndomains; i++) {
5360 		m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain);
5361 		if (m != NULL)
5362 			break;
5363 		domain = (domain + 1) % vm_ndomains;
5364 	}
5365 
5366 	return (m);
5367 }
5368 
5369 /*
5370  * free the pv_entry back to the free list
5371  */
5372 static void
5373 free_pv_entry(pmap_t pmap, pv_entry_t pv)
5374 {
5375 	struct pv_chunk *pc;
5376 	int idx, field, bit;
5377 
5378 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5379 	PV_STAT(counter_u64_add(pv_entry_frees, 1));
5380 	PV_STAT(counter_u64_add(pv_entry_spare, 1));
5381 	PV_STAT(counter_u64_add(pv_entry_count, -1));
5382 	pc = pv_to_chunk(pv);
5383 	idx = pv - &pc->pc_pventry[0];
5384 	field = idx / 64;
5385 	bit = idx % 64;
5386 	pc->pc_map[field] |= 1ul << bit;
5387 	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
5388 	    pc->pc_map[2] != PC_FREE2) {
5389 		/* 98% of the time, pc is already at the head of the list. */
5390 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
5391 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5392 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5393 		}
5394 		return;
5395 	}
5396 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5397 	free_pv_chunk(pc);
5398 }
5399 
5400 static void
5401 free_pv_chunk_dequeued(struct pv_chunk *pc)
5402 {
5403 	vm_page_t m;
5404 
5405 	PV_STAT(counter_u64_add(pv_entry_spare, -_NPCPV));
5406 	PV_STAT(counter_u64_add(pc_chunk_count, -1));
5407 	PV_STAT(counter_u64_add(pc_chunk_frees, 1));
5408 	counter_u64_add(pv_page_count, -1);
5409 	/* entire chunk is free, return it */
5410 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5411 	dump_drop_page(m->phys_addr);
5412 	vm_page_unwire_noq(m);
5413 	vm_page_free(m);
5414 }
5415 
5416 static void
5417 free_pv_chunk(struct pv_chunk *pc)
5418 {
5419 	struct pv_chunks_list *pvc;
5420 
5421 	pvc = &pv_chunks[pc_to_domain(pc)];
5422 	mtx_lock(&pvc->pvc_lock);
5423 	TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5424 	mtx_unlock(&pvc->pvc_lock);
5425 	free_pv_chunk_dequeued(pc);
5426 }
5427 
5428 static void
5429 free_pv_chunk_batch(struct pv_chunklist *batch)
5430 {
5431 	struct pv_chunks_list *pvc;
5432 	struct pv_chunk *pc, *npc;
5433 	int i;
5434 
5435 	for (i = 0; i < vm_ndomains; i++) {
5436 		if (TAILQ_EMPTY(&batch[i]))
5437 			continue;
5438 		pvc = &pv_chunks[i];
5439 		mtx_lock(&pvc->pvc_lock);
5440 		TAILQ_FOREACH(pc, &batch[i], pc_list) {
5441 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5442 		}
5443 		mtx_unlock(&pvc->pvc_lock);
5444 	}
5445 
5446 	for (i = 0; i < vm_ndomains; i++) {
5447 		TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) {
5448 			free_pv_chunk_dequeued(pc);
5449 		}
5450 	}
5451 }
5452 
5453 /*
5454  * Returns a new PV entry, allocating a new PV chunk from the system when
5455  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
5456  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
5457  * returned.
5458  *
5459  * The given PV list lock may be released.
5460  */
5461 static pv_entry_t
5462 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
5463 {
5464 	struct pv_chunks_list *pvc;
5465 	int bit, field;
5466 	pv_entry_t pv;
5467 	struct pv_chunk *pc;
5468 	vm_page_t m;
5469 
5470 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5471 	PV_STAT(counter_u64_add(pv_entry_allocs, 1));
5472 retry:
5473 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5474 	if (pc != NULL) {
5475 		for (field = 0; field < _NPCM; field++) {
5476 			if (pc->pc_map[field]) {
5477 				bit = bsfq(pc->pc_map[field]);
5478 				break;
5479 			}
5480 		}
5481 		if (field < _NPCM) {
5482 			pv = &pc->pc_pventry[field * 64 + bit];
5483 			pc->pc_map[field] &= ~(1ul << bit);
5484 			/* If this was the last item, move it to tail */
5485 			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
5486 			    pc->pc_map[2] == 0) {
5487 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5488 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
5489 				    pc_list);
5490 			}
5491 			PV_STAT(counter_u64_add(pv_entry_count, 1));
5492 			PV_STAT(counter_u64_add(pv_entry_spare, -1));
5493 			return (pv);
5494 		}
5495 	}
5496 	/* No free items, allocate another chunk */
5497 	m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5498 	if (m == NULL) {
5499 		if (lockp == NULL) {
5500 			PV_STAT(counter_u64_add(pc_chunk_tryfail, 1));
5501 			return (NULL);
5502 		}
5503 		m = reclaim_pv_chunk(pmap, lockp);
5504 		if (m == NULL)
5505 			goto retry;
5506 	} else
5507 		counter_u64_add(pv_page_count, 1);
5508 	PV_STAT(counter_u64_add(pc_chunk_count, 1));
5509 	PV_STAT(counter_u64_add(pc_chunk_allocs, 1));
5510 	dump_add_page(m->phys_addr);
5511 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5512 	pc->pc_pmap = pmap;
5513 	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
5514 	pc->pc_map[1] = PC_FREE1;
5515 	pc->pc_map[2] = PC_FREE2;
5516 	pvc = &pv_chunks[vm_page_domain(m)];
5517 	mtx_lock(&pvc->pvc_lock);
5518 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5519 	mtx_unlock(&pvc->pvc_lock);
5520 	pv = &pc->pc_pventry[0];
5521 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5522 	PV_STAT(counter_u64_add(pv_entry_count, 1));
5523 	PV_STAT(counter_u64_add(pv_entry_spare, _NPCPV - 1));
5524 	return (pv);
5525 }
5526 
5527 /*
5528  * Returns the number of one bits within the given PV chunk map.
5529  *
5530  * The erratas for Intel processors state that "POPCNT Instruction May
5531  * Take Longer to Execute Than Expected".  It is believed that the
5532  * issue is the spurious dependency on the destination register.
5533  * Provide a hint to the register rename logic that the destination
5534  * value is overwritten, by clearing it, as suggested in the
5535  * optimization manual.  It should be cheap for unaffected processors
5536  * as well.
5537  *
5538  * Reference numbers for erratas are
5539  * 4th Gen Core: HSD146
5540  * 5th Gen Core: BDM85
5541  * 6th Gen Core: SKL029
5542  */
5543 static int
5544 popcnt_pc_map_pq(uint64_t *map)
5545 {
5546 	u_long result, tmp;
5547 
5548 	__asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
5549 	    "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
5550 	    "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
5551 	    : "=&r" (result), "=&r" (tmp)
5552 	    : "m" (map[0]), "m" (map[1]), "m" (map[2]));
5553 	return (result);
5554 }
5555 
5556 /*
5557  * Ensure that the number of spare PV entries in the specified pmap meets or
5558  * exceeds the given count, "needed".
5559  *
5560  * The given PV list lock may be released.
5561  */
5562 static void
5563 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
5564 {
5565 	struct pv_chunks_list *pvc;
5566 	struct pch new_tail[PMAP_MEMDOM];
5567 	struct pv_chunk *pc;
5568 	vm_page_t m;
5569 	int avail, free, i;
5570 	bool reclaimed;
5571 
5572 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5573 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
5574 
5575 	/*
5576 	 * Newly allocated PV chunks must be stored in a private list until
5577 	 * the required number of PV chunks have been allocated.  Otherwise,
5578 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
5579 	 * contrast, these chunks must be added to the pmap upon allocation.
5580 	 */
5581 	for (i = 0; i < PMAP_MEMDOM; i++)
5582 		TAILQ_INIT(&new_tail[i]);
5583 retry:
5584 	avail = 0;
5585 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
5586 #ifndef __POPCNT__
5587 		if ((cpu_feature2 & CPUID2_POPCNT) == 0)
5588 			bit_count((bitstr_t *)pc->pc_map, 0,
5589 			    sizeof(pc->pc_map) * NBBY, &free);
5590 		else
5591 #endif
5592 		free = popcnt_pc_map_pq(pc->pc_map);
5593 		if (free == 0)
5594 			break;
5595 		avail += free;
5596 		if (avail >= needed)
5597 			break;
5598 	}
5599 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
5600 		m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5601 		if (m == NULL) {
5602 			m = reclaim_pv_chunk(pmap, lockp);
5603 			if (m == NULL)
5604 				goto retry;
5605 			reclaimed = true;
5606 		} else
5607 			counter_u64_add(pv_page_count, 1);
5608 		PV_STAT(counter_u64_add(pc_chunk_count, 1));
5609 		PV_STAT(counter_u64_add(pc_chunk_allocs, 1));
5610 		dump_add_page(m->phys_addr);
5611 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5612 		pc->pc_pmap = pmap;
5613 		pc->pc_map[0] = PC_FREE0;
5614 		pc->pc_map[1] = PC_FREE1;
5615 		pc->pc_map[2] = PC_FREE2;
5616 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5617 		TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru);
5618 		PV_STAT(counter_u64_add(pv_entry_spare, _NPCPV));
5619 
5620 		/*
5621 		 * The reclaim might have freed a chunk from the current pmap.
5622 		 * If that chunk contained available entries, we need to
5623 		 * re-count the number of available entries.
5624 		 */
5625 		if (reclaimed)
5626 			goto retry;
5627 	}
5628 	for (i = 0; i < vm_ndomains; i++) {
5629 		if (TAILQ_EMPTY(&new_tail[i]))
5630 			continue;
5631 		pvc = &pv_chunks[i];
5632 		mtx_lock(&pvc->pvc_lock);
5633 		TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru);
5634 		mtx_unlock(&pvc->pvc_lock);
5635 	}
5636 }
5637 
5638 /*
5639  * First find and then remove the pv entry for the specified pmap and virtual
5640  * address from the specified pv list.  Returns the pv entry if found and NULL
5641  * otherwise.  This operation can be performed on pv lists for either 4KB or
5642  * 2MB page mappings.
5643  */
5644 static __inline pv_entry_t
5645 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5646 {
5647 	pv_entry_t pv;
5648 
5649 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5650 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
5651 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5652 			pvh->pv_gen++;
5653 			break;
5654 		}
5655 	}
5656 	return (pv);
5657 }
5658 
5659 /*
5660  * After demotion from a 2MB page mapping to 512 4KB page mappings,
5661  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
5662  * entries for each of the 4KB page mappings.
5663  */
5664 static void
5665 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5666     struct rwlock **lockp)
5667 {
5668 	struct md_page *pvh;
5669 	struct pv_chunk *pc;
5670 	pv_entry_t pv;
5671 	vm_offset_t va_last;
5672 	vm_page_t m;
5673 	int bit, field;
5674 
5675 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5676 	KASSERT((pa & PDRMASK) == 0,
5677 	    ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
5678 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5679 
5680 	/*
5681 	 * Transfer the 2mpage's pv entry for this mapping to the first
5682 	 * page's pv list.  Once this transfer begins, the pv list lock
5683 	 * must not be released until the last pv entry is reinstantiated.
5684 	 */
5685 	pvh = pa_to_pvh(pa);
5686 	va = trunc_2mpage(va);
5687 	pv = pmap_pvh_remove(pvh, pmap, va);
5688 	KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
5689 	m = PHYS_TO_VM_PAGE(pa);
5690 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5691 	m->md.pv_gen++;
5692 	/* Instantiate the remaining NPTEPG - 1 pv entries. */
5693 	PV_STAT(counter_u64_add(pv_entry_allocs, NPTEPG - 1));
5694 	va_last = va + NBPDR - PAGE_SIZE;
5695 	for (;;) {
5696 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5697 		KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
5698 		    pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare"));
5699 		for (field = 0; field < _NPCM; field++) {
5700 			while (pc->pc_map[field]) {
5701 				bit = bsfq(pc->pc_map[field]);
5702 				pc->pc_map[field] &= ~(1ul << bit);
5703 				pv = &pc->pc_pventry[field * 64 + bit];
5704 				va += PAGE_SIZE;
5705 				pv->pv_va = va;
5706 				m++;
5707 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5708 			    ("pmap_pv_demote_pde: page %p is not managed", m));
5709 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5710 				m->md.pv_gen++;
5711 				if (va == va_last)
5712 					goto out;
5713 			}
5714 		}
5715 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5716 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5717 	}
5718 out:
5719 	if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
5720 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5721 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5722 	}
5723 	PV_STAT(counter_u64_add(pv_entry_count, NPTEPG - 1));
5724 	PV_STAT(counter_u64_add(pv_entry_spare, -(NPTEPG - 1)));
5725 }
5726 
5727 #if VM_NRESERVLEVEL > 0
5728 /*
5729  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
5730  * replace the many pv entries for the 4KB page mappings by a single pv entry
5731  * for the 2MB page mapping.
5732  */
5733 static void
5734 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5735     struct rwlock **lockp)
5736 {
5737 	struct md_page *pvh;
5738 	pv_entry_t pv;
5739 	vm_offset_t va_last;
5740 	vm_page_t m;
5741 
5742 	KASSERT((pa & PDRMASK) == 0,
5743 	    ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
5744 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5745 
5746 	/*
5747 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
5748 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
5749 	 * a transfer avoids the possibility that get_pv_entry() calls
5750 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
5751 	 * mappings that is being promoted.
5752 	 */
5753 	m = PHYS_TO_VM_PAGE(pa);
5754 	va = trunc_2mpage(va);
5755 	pv = pmap_pvh_remove(&m->md, pmap, va);
5756 	KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
5757 	pvh = pa_to_pvh(pa);
5758 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5759 	pvh->pv_gen++;
5760 	/* Free the remaining NPTEPG - 1 pv entries. */
5761 	va_last = va + NBPDR - PAGE_SIZE;
5762 	do {
5763 		m++;
5764 		va += PAGE_SIZE;
5765 		pmap_pvh_free(&m->md, pmap, va);
5766 	} while (va < va_last);
5767 }
5768 #endif /* VM_NRESERVLEVEL > 0 */
5769 
5770 /*
5771  * First find and then destroy the pv entry for the specified pmap and virtual
5772  * address.  This operation can be performed on pv lists for either 4KB or 2MB
5773  * page mappings.
5774  */
5775 static void
5776 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5777 {
5778 	pv_entry_t pv;
5779 
5780 	pv = pmap_pvh_remove(pvh, pmap, va);
5781 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
5782 	free_pv_entry(pmap, pv);
5783 }
5784 
5785 /*
5786  * Conditionally create the PV entry for a 4KB page mapping if the required
5787  * memory can be allocated without resorting to reclamation.
5788  */
5789 static boolean_t
5790 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
5791     struct rwlock **lockp)
5792 {
5793 	pv_entry_t pv;
5794 
5795 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5796 	/* Pass NULL instead of the lock pointer to disable reclamation. */
5797 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
5798 		pv->pv_va = va;
5799 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5800 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5801 		m->md.pv_gen++;
5802 		return (TRUE);
5803 	} else
5804 		return (FALSE);
5805 }
5806 
5807 /*
5808  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
5809  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
5810  * false if the PV entry cannot be allocated without resorting to reclamation.
5811  */
5812 static bool
5813 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
5814     struct rwlock **lockp)
5815 {
5816 	struct md_page *pvh;
5817 	pv_entry_t pv;
5818 	vm_paddr_t pa;
5819 
5820 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5821 	/* Pass NULL instead of the lock pointer to disable reclamation. */
5822 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
5823 	    NULL : lockp)) == NULL)
5824 		return (false);
5825 	pv->pv_va = va;
5826 	pa = pde & PG_PS_FRAME;
5827 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5828 	pvh = pa_to_pvh(pa);
5829 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5830 	pvh->pv_gen++;
5831 	return (true);
5832 }
5833 
5834 /*
5835  * Fills a page table page with mappings to consecutive physical pages.
5836  */
5837 static void
5838 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
5839 {
5840 	pt_entry_t *pte;
5841 
5842 	for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
5843 		*pte = newpte;
5844 		newpte += PAGE_SIZE;
5845 	}
5846 }
5847 
5848 /*
5849  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
5850  * mapping is invalidated.
5851  */
5852 static boolean_t
5853 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
5854 {
5855 	struct rwlock *lock;
5856 	boolean_t rv;
5857 
5858 	lock = NULL;
5859 	rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
5860 	if (lock != NULL)
5861 		rw_wunlock(lock);
5862 	return (rv);
5863 }
5864 
5865 static void
5866 pmap_demote_pde_check(pt_entry_t *firstpte __unused, pt_entry_t newpte __unused)
5867 {
5868 #ifdef INVARIANTS
5869 #ifdef DIAGNOSTIC
5870 	pt_entry_t *xpte, *ypte;
5871 
5872 	for (xpte = firstpte; xpte < firstpte + NPTEPG;
5873 	    xpte++, newpte += PAGE_SIZE) {
5874 		if ((*xpte & PG_FRAME) != (newpte & PG_FRAME)) {
5875 			printf("pmap_demote_pde: xpte %zd and newpte map "
5876 			    "different pages: found %#lx, expected %#lx\n",
5877 			    xpte - firstpte, *xpte, newpte);
5878 			printf("page table dump\n");
5879 			for (ypte = firstpte; ypte < firstpte + NPTEPG; ypte++)
5880 				printf("%zd %#lx\n", ypte - firstpte, *ypte);
5881 			panic("firstpte");
5882 		}
5883 	}
5884 #else
5885 	KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
5886 	    ("pmap_demote_pde: firstpte and newpte map different physical"
5887 	    " addresses"));
5888 #endif
5889 #endif
5890 }
5891 
5892 static void
5893 pmap_demote_pde_abort(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
5894     pd_entry_t oldpde, struct rwlock **lockp)
5895 {
5896 	struct spglist free;
5897 	vm_offset_t sva;
5898 
5899 	SLIST_INIT(&free);
5900 	sva = trunc_2mpage(va);
5901 	pmap_remove_pde(pmap, pde, sva, &free, lockp);
5902 	if ((oldpde & pmap_global_bit(pmap)) == 0)
5903 		pmap_invalidate_pde_page(pmap, sva, oldpde);
5904 	vm_page_free_pages_toq(&free, true);
5905 	CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx in pmap %p",
5906 	    va, pmap);
5907 }
5908 
5909 static boolean_t
5910 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
5911     struct rwlock **lockp)
5912 {
5913 	pd_entry_t newpde, oldpde;
5914 	pt_entry_t *firstpte, newpte;
5915 	pt_entry_t PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
5916 	vm_paddr_t mptepa;
5917 	vm_page_t mpte;
5918 	int PG_PTE_CACHE;
5919 	bool in_kernel;
5920 
5921 	PG_A = pmap_accessed_bit(pmap);
5922 	PG_G = pmap_global_bit(pmap);
5923 	PG_M = pmap_modified_bit(pmap);
5924 	PG_RW = pmap_rw_bit(pmap);
5925 	PG_V = pmap_valid_bit(pmap);
5926 	PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
5927 	PG_PKU_MASK = pmap_pku_mask_bit(pmap);
5928 
5929 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5930 	in_kernel = va >= VM_MAXUSER_ADDRESS;
5931 	oldpde = *pde;
5932 	KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
5933 	    ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
5934 
5935 	/*
5936 	 * Invalidate the 2MB page mapping and return "failure" if the
5937 	 * mapping was never accessed.
5938 	 */
5939 	if ((oldpde & PG_A) == 0) {
5940 		KASSERT((oldpde & PG_W) == 0,
5941 		    ("pmap_demote_pde: a wired mapping is missing PG_A"));
5942 		pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
5943 		return (FALSE);
5944 	}
5945 
5946 	mpte = pmap_remove_pt_page(pmap, va);
5947 	if (mpte == NULL) {
5948 		KASSERT((oldpde & PG_W) == 0,
5949 		    ("pmap_demote_pde: page table page for a wired mapping"
5950 		    " is missing"));
5951 
5952 		/*
5953 		 * If the page table page is missing and the mapping
5954 		 * is for a kernel address, the mapping must belong to
5955 		 * the direct map.  Page table pages are preallocated
5956 		 * for every other part of the kernel address space,
5957 		 * so the direct map region is the only part of the
5958 		 * kernel address space that must be handled here.
5959 		 */
5960 		KASSERT(!in_kernel || (va >= DMAP_MIN_ADDRESS &&
5961 		    va < DMAP_MAX_ADDRESS),
5962 		    ("pmap_demote_pde: No saved mpte for va %#lx", va));
5963 
5964 		/*
5965 		 * If the 2MB page mapping belongs to the direct map
5966 		 * region of the kernel's address space, then the page
5967 		 * allocation request specifies the highest possible
5968 		 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
5969 		 * priority is normal.
5970 		 */
5971 		mpte = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va),
5972 		    (in_kernel ? VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED);
5973 
5974 		/*
5975 		 * If the allocation of the new page table page fails,
5976 		 * invalidate the 2MB page mapping and return "failure".
5977 		 */
5978 		if (mpte == NULL) {
5979 			pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
5980 			return (FALSE);
5981 		}
5982 
5983 		if (!in_kernel)
5984 			mpte->ref_count = NPTEPG;
5985 	}
5986 	mptepa = VM_PAGE_TO_PHYS(mpte);
5987 	firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
5988 	newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
5989 	KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
5990 	    ("pmap_demote_pde: oldpde is missing PG_M"));
5991 	newpte = oldpde & ~PG_PS;
5992 	newpte = pmap_swap_pat(pmap, newpte);
5993 
5994 	/*
5995 	 * If the page table page is not leftover from an earlier promotion,
5996 	 * initialize it.
5997 	 */
5998 	if (mpte->valid == 0)
5999 		pmap_fill_ptp(firstpte, newpte);
6000 
6001 	pmap_demote_pde_check(firstpte, newpte);
6002 
6003 	/*
6004 	 * If the mapping has changed attributes, update the page table
6005 	 * entries.
6006 	 */
6007 	if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
6008 		pmap_fill_ptp(firstpte, newpte);
6009 
6010 	/*
6011 	 * The spare PV entries must be reserved prior to demoting the
6012 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
6013 	 * of the PDE and the PV lists will be inconsistent, which can result
6014 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
6015 	 * wrong PV list and pmap_pv_demote_pde() failing to find the expected
6016 	 * PV entry for the 2MB page mapping that is being demoted.
6017 	 */
6018 	if ((oldpde & PG_MANAGED) != 0)
6019 		reserve_pv_entries(pmap, NPTEPG - 1, lockp);
6020 
6021 	/*
6022 	 * Demote the mapping.  This pmap is locked.  The old PDE has
6023 	 * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
6024 	 * set.  Thus, there is no danger of a race with another
6025 	 * processor changing the setting of PG_A and/or PG_M between
6026 	 * the read above and the store below.
6027 	 */
6028 	if (workaround_erratum383)
6029 		pmap_update_pde(pmap, va, pde, newpde);
6030 	else
6031 		pde_store(pde, newpde);
6032 
6033 	/*
6034 	 * Invalidate a stale recursive mapping of the page table page.
6035 	 */
6036 	if (in_kernel)
6037 		pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
6038 
6039 	/*
6040 	 * Demote the PV entry.
6041 	 */
6042 	if ((oldpde & PG_MANAGED) != 0)
6043 		pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
6044 
6045 	counter_u64_add(pmap_pde_demotions, 1);
6046 	CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx in pmap %p",
6047 	    va, pmap);
6048 	return (TRUE);
6049 }
6050 
6051 /*
6052  * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
6053  */
6054 static void
6055 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
6056 {
6057 	pd_entry_t newpde;
6058 	vm_paddr_t mptepa;
6059 	vm_page_t mpte;
6060 
6061 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
6062 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6063 	mpte = pmap_remove_pt_page(pmap, va);
6064 	if (mpte == NULL)
6065 		panic("pmap_remove_kernel_pde: Missing pt page.");
6066 
6067 	mptepa = VM_PAGE_TO_PHYS(mpte);
6068 	newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
6069 
6070 	/*
6071 	 * If this page table page was unmapped by a promotion, then it
6072 	 * contains valid mappings.  Zero it to invalidate those mappings.
6073 	 */
6074 	if (mpte->valid != 0)
6075 		pagezero((void *)PHYS_TO_DMAP(mptepa));
6076 
6077 	/*
6078 	 * Demote the mapping.
6079 	 */
6080 	if (workaround_erratum383)
6081 		pmap_update_pde(pmap, va, pde, newpde);
6082 	else
6083 		pde_store(pde, newpde);
6084 
6085 	/*
6086 	 * Invalidate a stale recursive mapping of the page table page.
6087 	 */
6088 	pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
6089 }
6090 
6091 /*
6092  * pmap_remove_pde: do the things to unmap a superpage in a process
6093  */
6094 static int
6095 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
6096     struct spglist *free, struct rwlock **lockp)
6097 {
6098 	struct md_page *pvh;
6099 	pd_entry_t oldpde;
6100 	vm_offset_t eva, va;
6101 	vm_page_t m, mpte;
6102 	pt_entry_t PG_G, PG_A, PG_M, PG_RW;
6103 
6104 	PG_G = pmap_global_bit(pmap);
6105 	PG_A = pmap_accessed_bit(pmap);
6106 	PG_M = pmap_modified_bit(pmap);
6107 	PG_RW = pmap_rw_bit(pmap);
6108 
6109 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6110 	KASSERT((sva & PDRMASK) == 0,
6111 	    ("pmap_remove_pde: sva is not 2mpage aligned"));
6112 	oldpde = pte_load_clear(pdq);
6113 	if (oldpde & PG_W)
6114 		pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
6115 	if ((oldpde & PG_G) != 0)
6116 		pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
6117 	pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE);
6118 	if (oldpde & PG_MANAGED) {
6119 		CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
6120 		pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
6121 		pmap_pvh_free(pvh, pmap, sva);
6122 		eva = sva + NBPDR;
6123 		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
6124 		    va < eva; va += PAGE_SIZE, m++) {
6125 			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
6126 				vm_page_dirty(m);
6127 			if (oldpde & PG_A)
6128 				vm_page_aflag_set(m, PGA_REFERENCED);
6129 			if (TAILQ_EMPTY(&m->md.pv_list) &&
6130 			    TAILQ_EMPTY(&pvh->pv_list))
6131 				vm_page_aflag_clear(m, PGA_WRITEABLE);
6132 			pmap_delayed_invl_page(m);
6133 		}
6134 	}
6135 	if (pmap == kernel_pmap) {
6136 		pmap_remove_kernel_pde(pmap, pdq, sva);
6137 	} else {
6138 		mpte = pmap_remove_pt_page(pmap, sva);
6139 		if (mpte != NULL) {
6140 			KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
6141 			    ("pmap_remove_pde: pte page not promoted"));
6142 			pmap_pt_page_count_adj(pmap, -1);
6143 			KASSERT(mpte->ref_count == NPTEPG,
6144 			    ("pmap_remove_pde: pte page ref count error"));
6145 			mpte->ref_count = 0;
6146 			pmap_add_delayed_free_list(mpte, free, FALSE);
6147 		}
6148 	}
6149 	return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
6150 }
6151 
6152 /*
6153  * pmap_remove_pte: do the things to unmap a page in a process
6154  */
6155 static int
6156 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
6157     pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
6158 {
6159 	struct md_page *pvh;
6160 	pt_entry_t oldpte, PG_A, PG_M, PG_RW;
6161 	vm_page_t m;
6162 
6163 	PG_A = pmap_accessed_bit(pmap);
6164 	PG_M = pmap_modified_bit(pmap);
6165 	PG_RW = pmap_rw_bit(pmap);
6166 
6167 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6168 	oldpte = pte_load_clear(ptq);
6169 	if (oldpte & PG_W)
6170 		pmap->pm_stats.wired_count -= 1;
6171 	pmap_resident_count_adj(pmap, -1);
6172 	if (oldpte & PG_MANAGED) {
6173 		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
6174 		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6175 			vm_page_dirty(m);
6176 		if (oldpte & PG_A)
6177 			vm_page_aflag_set(m, PGA_REFERENCED);
6178 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
6179 		pmap_pvh_free(&m->md, pmap, va);
6180 		if (TAILQ_EMPTY(&m->md.pv_list) &&
6181 		    (m->flags & PG_FICTITIOUS) == 0) {
6182 			pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
6183 			if (TAILQ_EMPTY(&pvh->pv_list))
6184 				vm_page_aflag_clear(m, PGA_WRITEABLE);
6185 		}
6186 		pmap_delayed_invl_page(m);
6187 	}
6188 	return (pmap_unuse_pt(pmap, va, ptepde, free));
6189 }
6190 
6191 /*
6192  * Remove a single page from a process address space
6193  */
6194 static void
6195 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
6196     struct spglist *free)
6197 {
6198 	struct rwlock *lock;
6199 	pt_entry_t *pte, PG_V;
6200 
6201 	PG_V = pmap_valid_bit(pmap);
6202 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6203 	if ((*pde & PG_V) == 0)
6204 		return;
6205 	pte = pmap_pde_to_pte(pde, va);
6206 	if ((*pte & PG_V) == 0)
6207 		return;
6208 	lock = NULL;
6209 	pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
6210 	if (lock != NULL)
6211 		rw_wunlock(lock);
6212 	pmap_invalidate_page(pmap, va);
6213 }
6214 
6215 /*
6216  * Removes the specified range of addresses from the page table page.
6217  */
6218 static bool
6219 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
6220     pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
6221 {
6222 	pt_entry_t PG_G, *pte;
6223 	vm_offset_t va;
6224 	bool anyvalid;
6225 
6226 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6227 	PG_G = pmap_global_bit(pmap);
6228 	anyvalid = false;
6229 	va = eva;
6230 	for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
6231 	    sva += PAGE_SIZE) {
6232 		if (*pte == 0) {
6233 			if (va != eva) {
6234 				pmap_invalidate_range(pmap, va, sva);
6235 				va = eva;
6236 			}
6237 			continue;
6238 		}
6239 		if ((*pte & PG_G) == 0)
6240 			anyvalid = true;
6241 		else if (va == eva)
6242 			va = sva;
6243 		if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
6244 			sva += PAGE_SIZE;
6245 			break;
6246 		}
6247 	}
6248 	if (va != eva)
6249 		pmap_invalidate_range(pmap, va, sva);
6250 	return (anyvalid);
6251 }
6252 
6253 /*
6254  *	Remove the given range of addresses from the specified map.
6255  *
6256  *	It is assumed that the start and end are properly
6257  *	rounded to the page size.
6258  */
6259 void
6260 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6261 {
6262 	struct rwlock *lock;
6263 	vm_page_t mt;
6264 	vm_offset_t va_next;
6265 	pml5_entry_t *pml5e;
6266 	pml4_entry_t *pml4e;
6267 	pdp_entry_t *pdpe;
6268 	pd_entry_t ptpaddr, *pde;
6269 	pt_entry_t PG_G, PG_V;
6270 	struct spglist free;
6271 	int anyvalid;
6272 
6273 	PG_G = pmap_global_bit(pmap);
6274 	PG_V = pmap_valid_bit(pmap);
6275 
6276 	/*
6277 	 * If there are no resident pages besides the top level page
6278 	 * table page(s), there is nothing to do.  Kernel pmap always
6279 	 * accounts whole preloaded area as resident, which makes its
6280 	 * resident count > 2.
6281 	 * Perform an unsynchronized read.  This is, however, safe.
6282 	 */
6283 	if (pmap->pm_stats.resident_count <= 1 + (pmap->pm_pmltopu != NULL ?
6284 	    1 : 0))
6285 		return;
6286 
6287 	anyvalid = 0;
6288 	SLIST_INIT(&free);
6289 
6290 	pmap_delayed_invl_start();
6291 	PMAP_LOCK(pmap);
6292 	pmap_pkru_on_remove(pmap, sva, eva);
6293 
6294 	/*
6295 	 * special handling of removing one page.  a very
6296 	 * common operation and easy to short circuit some
6297 	 * code.
6298 	 */
6299 	if (sva + PAGE_SIZE == eva) {
6300 		pde = pmap_pde(pmap, sva);
6301 		if (pde && (*pde & PG_PS) == 0) {
6302 			pmap_remove_page(pmap, sva, pde, &free);
6303 			goto out;
6304 		}
6305 	}
6306 
6307 	lock = NULL;
6308 	for (; sva < eva; sva = va_next) {
6309 		if (pmap->pm_stats.resident_count == 0)
6310 			break;
6311 
6312 		if (pmap_is_la57(pmap)) {
6313 			pml5e = pmap_pml5e(pmap, sva);
6314 			if ((*pml5e & PG_V) == 0) {
6315 				va_next = (sva + NBPML5) & ~PML5MASK;
6316 				if (va_next < sva)
6317 					va_next = eva;
6318 				continue;
6319 			}
6320 			pml4e = pmap_pml5e_to_pml4e(pml5e, sva);
6321 		} else {
6322 			pml4e = pmap_pml4e(pmap, sva);
6323 		}
6324 		if ((*pml4e & PG_V) == 0) {
6325 			va_next = (sva + NBPML4) & ~PML4MASK;
6326 			if (va_next < sva)
6327 				va_next = eva;
6328 			continue;
6329 		}
6330 
6331 		va_next = (sva + NBPDP) & ~PDPMASK;
6332 		if (va_next < sva)
6333 			va_next = eva;
6334 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6335 		if ((*pdpe & PG_V) == 0)
6336 			continue;
6337 		if ((*pdpe & PG_PS) != 0) {
6338 			KASSERT(va_next <= eva,
6339 			    ("partial update of non-transparent 1G mapping "
6340 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6341 			    *pdpe, sva, eva, va_next));
6342 			MPASS(pmap != kernel_pmap); /* XXXKIB */
6343 			MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
6344 			anyvalid = 1;
6345 			*pdpe = 0;
6346 			pmap_resident_count_adj(pmap, -NBPDP / PAGE_SIZE);
6347 			mt = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, sva) & PG_FRAME);
6348 			pmap_unwire_ptp(pmap, sva, mt, &free);
6349 			continue;
6350 		}
6351 
6352 		/*
6353 		 * Calculate index for next page table.
6354 		 */
6355 		va_next = (sva + NBPDR) & ~PDRMASK;
6356 		if (va_next < sva)
6357 			va_next = eva;
6358 
6359 		pde = pmap_pdpe_to_pde(pdpe, sva);
6360 		ptpaddr = *pde;
6361 
6362 		/*
6363 		 * Weed out invalid mappings.
6364 		 */
6365 		if (ptpaddr == 0)
6366 			continue;
6367 
6368 		/*
6369 		 * Check for large page.
6370 		 */
6371 		if ((ptpaddr & PG_PS) != 0) {
6372 			/*
6373 			 * Are we removing the entire large page?  If not,
6374 			 * demote the mapping and fall through.
6375 			 */
6376 			if (sva + NBPDR == va_next && eva >= va_next) {
6377 				/*
6378 				 * The TLB entry for a PG_G mapping is
6379 				 * invalidated by pmap_remove_pde().
6380 				 */
6381 				if ((ptpaddr & PG_G) == 0)
6382 					anyvalid = 1;
6383 				pmap_remove_pde(pmap, pde, sva, &free, &lock);
6384 				continue;
6385 			} else if (!pmap_demote_pde_locked(pmap, pde, sva,
6386 			    &lock)) {
6387 				/* The large page mapping was destroyed. */
6388 				continue;
6389 			} else
6390 				ptpaddr = *pde;
6391 		}
6392 
6393 		/*
6394 		 * Limit our scan to either the end of the va represented
6395 		 * by the current page table page, or to the end of the
6396 		 * range being removed.
6397 		 */
6398 		if (va_next > eva)
6399 			va_next = eva;
6400 
6401 		if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
6402 			anyvalid = 1;
6403 	}
6404 	if (lock != NULL)
6405 		rw_wunlock(lock);
6406 out:
6407 	if (anyvalid)
6408 		pmap_invalidate_all(pmap);
6409 	PMAP_UNLOCK(pmap);
6410 	pmap_delayed_invl_finish();
6411 	vm_page_free_pages_toq(&free, true);
6412 }
6413 
6414 /*
6415  *	Routine:	pmap_remove_all
6416  *	Function:
6417  *		Removes this physical page from
6418  *		all physical maps in which it resides.
6419  *		Reflects back modify bits to the pager.
6420  *
6421  *	Notes:
6422  *		Original versions of this routine were very
6423  *		inefficient because they iteratively called
6424  *		pmap_remove (slow...)
6425  */
6426 
6427 void
6428 pmap_remove_all(vm_page_t m)
6429 {
6430 	struct md_page *pvh;
6431 	pv_entry_t pv;
6432 	pmap_t pmap;
6433 	struct rwlock *lock;
6434 	pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
6435 	pd_entry_t *pde;
6436 	vm_offset_t va;
6437 	struct spglist free;
6438 	int pvh_gen, md_gen;
6439 
6440 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6441 	    ("pmap_remove_all: page %p is not managed", m));
6442 	SLIST_INIT(&free);
6443 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6444 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
6445 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
6446 	rw_wlock(lock);
6447 retry:
6448 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
6449 		pmap = PV_PMAP(pv);
6450 		if (!PMAP_TRYLOCK(pmap)) {
6451 			pvh_gen = pvh->pv_gen;
6452 			rw_wunlock(lock);
6453 			PMAP_LOCK(pmap);
6454 			rw_wlock(lock);
6455 			if (pvh_gen != pvh->pv_gen) {
6456 				PMAP_UNLOCK(pmap);
6457 				goto retry;
6458 			}
6459 		}
6460 		va = pv->pv_va;
6461 		pde = pmap_pde(pmap, va);
6462 		(void)pmap_demote_pde_locked(pmap, pde, va, &lock);
6463 		PMAP_UNLOCK(pmap);
6464 	}
6465 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
6466 		pmap = PV_PMAP(pv);
6467 		if (!PMAP_TRYLOCK(pmap)) {
6468 			pvh_gen = pvh->pv_gen;
6469 			md_gen = m->md.pv_gen;
6470 			rw_wunlock(lock);
6471 			PMAP_LOCK(pmap);
6472 			rw_wlock(lock);
6473 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6474 				PMAP_UNLOCK(pmap);
6475 				goto retry;
6476 			}
6477 		}
6478 		PG_A = pmap_accessed_bit(pmap);
6479 		PG_M = pmap_modified_bit(pmap);
6480 		PG_RW = pmap_rw_bit(pmap);
6481 		pmap_resident_count_adj(pmap, -1);
6482 		pde = pmap_pde(pmap, pv->pv_va);
6483 		KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
6484 		    " a 2mpage in page %p's pv list", m));
6485 		pte = pmap_pde_to_pte(pde, pv->pv_va);
6486 		tpte = pte_load_clear(pte);
6487 		if (tpte & PG_W)
6488 			pmap->pm_stats.wired_count--;
6489 		if (tpte & PG_A)
6490 			vm_page_aflag_set(m, PGA_REFERENCED);
6491 
6492 		/*
6493 		 * Update the vm_page_t clean and reference bits.
6494 		 */
6495 		if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6496 			vm_page_dirty(m);
6497 		pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
6498 		pmap_invalidate_page(pmap, pv->pv_va);
6499 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
6500 		m->md.pv_gen++;
6501 		free_pv_entry(pmap, pv);
6502 		PMAP_UNLOCK(pmap);
6503 	}
6504 	vm_page_aflag_clear(m, PGA_WRITEABLE);
6505 	rw_wunlock(lock);
6506 	pmap_delayed_invl_wait(m);
6507 	vm_page_free_pages_toq(&free, true);
6508 }
6509 
6510 /*
6511  * pmap_protect_pde: do the things to protect a 2mpage in a process
6512  */
6513 static boolean_t
6514 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
6515 {
6516 	pd_entry_t newpde, oldpde;
6517 	vm_page_t m, mt;
6518 	boolean_t anychanged;
6519 	pt_entry_t PG_G, PG_M, PG_RW;
6520 
6521 	PG_G = pmap_global_bit(pmap);
6522 	PG_M = pmap_modified_bit(pmap);
6523 	PG_RW = pmap_rw_bit(pmap);
6524 
6525 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6526 	KASSERT((sva & PDRMASK) == 0,
6527 	    ("pmap_protect_pde: sva is not 2mpage aligned"));
6528 	anychanged = FALSE;
6529 retry:
6530 	oldpde = newpde = *pde;
6531 	if ((prot & VM_PROT_WRITE) == 0) {
6532 		if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
6533 		    (PG_MANAGED | PG_M | PG_RW)) {
6534 			m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
6535 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
6536 				vm_page_dirty(mt);
6537 		}
6538 		newpde &= ~(PG_RW | PG_M);
6539 	}
6540 	if ((prot & VM_PROT_EXECUTE) == 0)
6541 		newpde |= pg_nx;
6542 	if (newpde != oldpde) {
6543 		/*
6544 		 * As an optimization to future operations on this PDE, clear
6545 		 * PG_PROMOTED.  The impending invalidation will remove any
6546 		 * lingering 4KB page mappings from the TLB.
6547 		 */
6548 		if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
6549 			goto retry;
6550 		if ((oldpde & PG_G) != 0)
6551 			pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
6552 		else
6553 			anychanged = TRUE;
6554 	}
6555 	return (anychanged);
6556 }
6557 
6558 /*
6559  *	Set the physical protection on the
6560  *	specified range of this map as requested.
6561  */
6562 void
6563 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
6564 {
6565 	vm_page_t m;
6566 	vm_offset_t va_next;
6567 	pml4_entry_t *pml4e;
6568 	pdp_entry_t *pdpe;
6569 	pd_entry_t ptpaddr, *pde;
6570 	pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
6571 	pt_entry_t obits, pbits;
6572 	boolean_t anychanged;
6573 
6574 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
6575 	if (prot == VM_PROT_NONE) {
6576 		pmap_remove(pmap, sva, eva);
6577 		return;
6578 	}
6579 
6580 	if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
6581 	    (VM_PROT_WRITE|VM_PROT_EXECUTE))
6582 		return;
6583 
6584 	PG_G = pmap_global_bit(pmap);
6585 	PG_M = pmap_modified_bit(pmap);
6586 	PG_V = pmap_valid_bit(pmap);
6587 	PG_RW = pmap_rw_bit(pmap);
6588 	anychanged = FALSE;
6589 
6590 	/*
6591 	 * Although this function delays and batches the invalidation
6592 	 * of stale TLB entries, it does not need to call
6593 	 * pmap_delayed_invl_start() and
6594 	 * pmap_delayed_invl_finish(), because it does not
6595 	 * ordinarily destroy mappings.  Stale TLB entries from
6596 	 * protection-only changes need only be invalidated before the
6597 	 * pmap lock is released, because protection-only changes do
6598 	 * not destroy PV entries.  Even operations that iterate over
6599 	 * a physical page's PV list of mappings, like
6600 	 * pmap_remove_write(), acquire the pmap lock for each
6601 	 * mapping.  Consequently, for protection-only changes, the
6602 	 * pmap lock suffices to synchronize both page table and TLB
6603 	 * updates.
6604 	 *
6605 	 * This function only destroys a mapping if pmap_demote_pde()
6606 	 * fails.  In that case, stale TLB entries are immediately
6607 	 * invalidated.
6608 	 */
6609 
6610 	PMAP_LOCK(pmap);
6611 	for (; sva < eva; sva = va_next) {
6612 		pml4e = pmap_pml4e(pmap, sva);
6613 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
6614 			va_next = (sva + NBPML4) & ~PML4MASK;
6615 			if (va_next < sva)
6616 				va_next = eva;
6617 			continue;
6618 		}
6619 
6620 		va_next = (sva + NBPDP) & ~PDPMASK;
6621 		if (va_next < sva)
6622 			va_next = eva;
6623 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6624 		if ((*pdpe & PG_V) == 0)
6625 			continue;
6626 		if ((*pdpe & PG_PS) != 0) {
6627 			KASSERT(va_next <= eva,
6628 			    ("partial update of non-transparent 1G mapping "
6629 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6630 			    *pdpe, sva, eva, va_next));
6631 retry_pdpe:
6632 			obits = pbits = *pdpe;
6633 			MPASS((pbits & (PG_MANAGED | PG_G)) == 0);
6634 			MPASS(pmap != kernel_pmap); /* XXXKIB */
6635 			if ((prot & VM_PROT_WRITE) == 0)
6636 				pbits &= ~(PG_RW | PG_M);
6637 			if ((prot & VM_PROT_EXECUTE) == 0)
6638 				pbits |= pg_nx;
6639 
6640 			if (pbits != obits) {
6641 				if (!atomic_cmpset_long(pdpe, obits, pbits))
6642 					/* PG_PS cannot be cleared under us, */
6643 					goto retry_pdpe;
6644 				anychanged = TRUE;
6645 			}
6646 			continue;
6647 		}
6648 
6649 		va_next = (sva + NBPDR) & ~PDRMASK;
6650 		if (va_next < sva)
6651 			va_next = eva;
6652 
6653 		pde = pmap_pdpe_to_pde(pdpe, sva);
6654 		ptpaddr = *pde;
6655 
6656 		/*
6657 		 * Weed out invalid mappings.
6658 		 */
6659 		if (ptpaddr == 0)
6660 			continue;
6661 
6662 		/*
6663 		 * Check for large page.
6664 		 */
6665 		if ((ptpaddr & PG_PS) != 0) {
6666 			/*
6667 			 * Are we protecting the entire large page?  If not,
6668 			 * demote the mapping and fall through.
6669 			 */
6670 			if (sva + NBPDR == va_next && eva >= va_next) {
6671 				/*
6672 				 * The TLB entry for a PG_G mapping is
6673 				 * invalidated by pmap_protect_pde().
6674 				 */
6675 				if (pmap_protect_pde(pmap, pde, sva, prot))
6676 					anychanged = TRUE;
6677 				continue;
6678 			} else if (!pmap_demote_pde(pmap, pde, sva)) {
6679 				/*
6680 				 * The large page mapping was destroyed.
6681 				 */
6682 				continue;
6683 			}
6684 		}
6685 
6686 		if (va_next > eva)
6687 			va_next = eva;
6688 
6689 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
6690 		    sva += PAGE_SIZE) {
6691 retry:
6692 			obits = pbits = *pte;
6693 			if ((pbits & PG_V) == 0)
6694 				continue;
6695 
6696 			if ((prot & VM_PROT_WRITE) == 0) {
6697 				if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
6698 				    (PG_MANAGED | PG_M | PG_RW)) {
6699 					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
6700 					vm_page_dirty(m);
6701 				}
6702 				pbits &= ~(PG_RW | PG_M);
6703 			}
6704 			if ((prot & VM_PROT_EXECUTE) == 0)
6705 				pbits |= pg_nx;
6706 
6707 			if (pbits != obits) {
6708 				if (!atomic_cmpset_long(pte, obits, pbits))
6709 					goto retry;
6710 				if (obits & PG_G)
6711 					pmap_invalidate_page(pmap, sva);
6712 				else
6713 					anychanged = TRUE;
6714 			}
6715 		}
6716 	}
6717 	if (anychanged)
6718 		pmap_invalidate_all(pmap);
6719 	PMAP_UNLOCK(pmap);
6720 }
6721 
6722 #if VM_NRESERVLEVEL > 0
6723 static bool
6724 pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde)
6725 {
6726 
6727 	if (pmap->pm_type != PT_EPT)
6728 		return (false);
6729 	return ((pde & EPT_PG_EXECUTE) != 0);
6730 }
6731 
6732 /*
6733  * Tries to promote the 512, contiguous 4KB page mappings that are within a
6734  * single page table page (PTP) to a single 2MB page mapping.  For promotion
6735  * to occur, two conditions must be met: (1) the 4KB page mappings must map
6736  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
6737  * identical characteristics.
6738  */
6739 static void
6740 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
6741     struct rwlock **lockp)
6742 {
6743 	pd_entry_t newpde;
6744 	pt_entry_t *firstpte, oldpte, pa, *pte;
6745 	pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V, PG_PKU_MASK;
6746 	vm_page_t mpte;
6747 	int PG_PTE_CACHE;
6748 
6749 	PG_A = pmap_accessed_bit(pmap);
6750 	PG_G = pmap_global_bit(pmap);
6751 	PG_M = pmap_modified_bit(pmap);
6752 	PG_V = pmap_valid_bit(pmap);
6753 	PG_RW = pmap_rw_bit(pmap);
6754 	PG_PKU_MASK = pmap_pku_mask_bit(pmap);
6755 	PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
6756 
6757 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6758 
6759 	/*
6760 	 * Examine the first PTE in the specified PTP.  Abort if this PTE is
6761 	 * either invalid, unused, or does not map the first 4KB physical page
6762 	 * within a 2MB page.
6763 	 */
6764 	firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
6765 	newpde = *firstpte;
6766 	if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V) ||
6767 	    !pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
6768 	    newpde))) {
6769 		counter_u64_add(pmap_pde_p_failures, 1);
6770 		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6771 		    " in pmap %p", va, pmap);
6772 		return;
6773 	}
6774 setpde:
6775 	if ((newpde & (PG_M | PG_RW)) == PG_RW) {
6776 		/*
6777 		 * When PG_M is already clear, PG_RW can be cleared without
6778 		 * a TLB invalidation.
6779 		 */
6780 		if (!atomic_fcmpset_long(firstpte, &newpde, newpde & ~PG_RW))
6781 			goto setpde;
6782 		newpde &= ~PG_RW;
6783 	}
6784 
6785 	/*
6786 	 * Examine each of the other PTEs in the specified PTP.  Abort if this
6787 	 * PTE maps an unexpected 4KB physical page or does not have identical
6788 	 * characteristics to the first PTE.
6789 	 */
6790 	pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
6791 	for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
6792 		oldpte = *pte;
6793 		if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
6794 			counter_u64_add(pmap_pde_p_failures, 1);
6795 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6796 			    " in pmap %p", va, pmap);
6797 			return;
6798 		}
6799 setpte:
6800 		if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
6801 			/*
6802 			 * When PG_M is already clear, PG_RW can be cleared
6803 			 * without a TLB invalidation.
6804 			 */
6805 			if (!atomic_fcmpset_long(pte, &oldpte, oldpte & ~PG_RW))
6806 				goto setpte;
6807 			oldpte &= ~PG_RW;
6808 			CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
6809 			    " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
6810 			    (va & ~PDRMASK), pmap);
6811 		}
6812 		if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
6813 			counter_u64_add(pmap_pde_p_failures, 1);
6814 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6815 			    " in pmap %p", va, pmap);
6816 			return;
6817 		}
6818 		pa -= PAGE_SIZE;
6819 	}
6820 
6821 	/*
6822 	 * Save the page table page in its current state until the PDE
6823 	 * mapping the superpage is demoted by pmap_demote_pde() or
6824 	 * destroyed by pmap_remove_pde().
6825 	 */
6826 	mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
6827 	KASSERT(mpte >= vm_page_array &&
6828 	    mpte < &vm_page_array[vm_page_array_size],
6829 	    ("pmap_promote_pde: page table page is out of range"));
6830 	KASSERT(mpte->pindex == pmap_pde_pindex(va),
6831 	    ("pmap_promote_pde: page table page's pindex is wrong "
6832 	    "mpte %p pidx %#lx va %#lx va pde pidx %#lx",
6833 	    mpte, mpte->pindex, va, pmap_pde_pindex(va)));
6834 	if (pmap_insert_pt_page(pmap, mpte, true)) {
6835 		counter_u64_add(pmap_pde_p_failures, 1);
6836 		CTR2(KTR_PMAP,
6837 		    "pmap_promote_pde: failure for va %#lx in pmap %p", va,
6838 		    pmap);
6839 		return;
6840 	}
6841 
6842 	/*
6843 	 * Promote the pv entries.
6844 	 */
6845 	if ((newpde & PG_MANAGED) != 0)
6846 		pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
6847 
6848 	/*
6849 	 * Propagate the PAT index to its proper position.
6850 	 */
6851 	newpde = pmap_swap_pat(pmap, newpde);
6852 
6853 	/*
6854 	 * Map the superpage.
6855 	 */
6856 	if (workaround_erratum383)
6857 		pmap_update_pde(pmap, va, pde, PG_PS | newpde);
6858 	else
6859 		pde_store(pde, PG_PROMOTED | PG_PS | newpde);
6860 
6861 	counter_u64_add(pmap_pde_promotions, 1);
6862 	CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
6863 	    " in pmap %p", va, pmap);
6864 }
6865 #endif /* VM_NRESERVLEVEL > 0 */
6866 
6867 static int
6868 pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags,
6869     int psind)
6870 {
6871 	vm_page_t mp;
6872 	pt_entry_t origpte, *pml4e, *pdpe, *pde, pten, PG_V;
6873 
6874 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6875 	KASSERT(psind > 0 && psind < MAXPAGESIZES && pagesizes[psind] != 0,
6876 	    ("psind %d unexpected", psind));
6877 	KASSERT(((newpte & PG_FRAME) & (pagesizes[psind] - 1)) == 0,
6878 	    ("unaligned phys address %#lx newpte %#lx psind %d",
6879 	    newpte & PG_FRAME, newpte, psind));
6880 	KASSERT((va & (pagesizes[psind] - 1)) == 0,
6881 	    ("unaligned va %#lx psind %d", va, psind));
6882 	KASSERT(va < VM_MAXUSER_ADDRESS,
6883 	    ("kernel mode non-transparent superpage")); /* XXXKIB */
6884 	KASSERT(va + pagesizes[psind] < VM_MAXUSER_ADDRESS,
6885 	    ("overflowing user map va %#lx psind %d", va, psind)); /* XXXKIB */
6886 
6887 	PG_V = pmap_valid_bit(pmap);
6888 
6889 restart:
6890 	if (!pmap_pkru_same(pmap, va, va + pagesizes[psind]))
6891 		return (KERN_PROTECTION_FAILURE);
6892 	pten = newpte;
6893 	if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86)
6894 		pten |= pmap_pkru_get(pmap, va);
6895 
6896 	if (psind == 2) {	/* 1G */
6897 		pml4e = pmap_pml4e(pmap, va);
6898 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
6899 			mp = pmap_allocpte_alloc(pmap, pmap_pml4e_pindex(va),
6900 			    NULL, va);
6901 			if (mp == NULL)
6902 				goto allocf;
6903 			pdpe = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
6904 			pdpe = &pdpe[pmap_pdpe_index(va)];
6905 			origpte = *pdpe;
6906 			MPASS(origpte == 0);
6907 		} else {
6908 			pdpe = pmap_pml4e_to_pdpe(pml4e, va);
6909 			KASSERT(pdpe != NULL, ("va %#lx lost pdpe", va));
6910 			origpte = *pdpe;
6911 			if ((origpte & PG_V) == 0) {
6912 				mp = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
6913 				mp->ref_count++;
6914 			}
6915 		}
6916 		*pdpe = pten;
6917 	} else /* (psind == 1) */ {	/* 2M */
6918 		pde = pmap_pde(pmap, va);
6919 		if (pde == NULL) {
6920 			mp = pmap_allocpte_alloc(pmap, pmap_pdpe_pindex(va),
6921 			    NULL, va);
6922 			if (mp == NULL)
6923 				goto allocf;
6924 			pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
6925 			pde = &pde[pmap_pde_index(va)];
6926 			origpte = *pde;
6927 			MPASS(origpte == 0);
6928 		} else {
6929 			origpte = *pde;
6930 			if ((origpte & PG_V) == 0) {
6931 				pdpe = pmap_pdpe(pmap, va);
6932 				MPASS(pdpe != NULL && (*pdpe & PG_V) != 0);
6933 				mp = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
6934 				mp->ref_count++;
6935 			}
6936 		}
6937 		*pde = pten;
6938 	}
6939 	KASSERT((origpte & PG_V) == 0 || ((origpte & PG_PS) != 0 &&
6940 	    (origpte & PG_PS_FRAME) == (pten & PG_PS_FRAME)),
6941 	    ("va %#lx changing %s phys page origpte %#lx pten %#lx",
6942 	    va, psind == 2 ? "1G" : "2M", origpte, pten));
6943 	if ((pten & PG_W) != 0 && (origpte & PG_W) == 0)
6944 		pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE;
6945 	else if ((pten & PG_W) == 0 && (origpte & PG_W) != 0)
6946 		pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE;
6947 	if ((origpte & PG_V) == 0)
6948 		pmap_resident_count_adj(pmap, pagesizes[psind] / PAGE_SIZE);
6949 
6950 	return (KERN_SUCCESS);
6951 
6952 allocf:
6953 	if ((flags & PMAP_ENTER_NOSLEEP) != 0)
6954 		return (KERN_RESOURCE_SHORTAGE);
6955 	PMAP_UNLOCK(pmap);
6956 	vm_wait(NULL);
6957 	PMAP_LOCK(pmap);
6958 	goto restart;
6959 }
6960 
6961 /*
6962  *	Insert the given physical page (p) at
6963  *	the specified virtual address (v) in the
6964  *	target physical map with the protection requested.
6965  *
6966  *	If specified, the page will be wired down, meaning
6967  *	that the related pte can not be reclaimed.
6968  *
6969  *	NB:  This is the only routine which MAY NOT lazy-evaluate
6970  *	or lose information.  That is, this routine must actually
6971  *	insert this page into the given map NOW.
6972  *
6973  *	When destroying both a page table and PV entry, this function
6974  *	performs the TLB invalidation before releasing the PV list
6975  *	lock, so we do not need pmap_delayed_invl_page() calls here.
6976  */
6977 int
6978 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
6979     u_int flags, int8_t psind)
6980 {
6981 	struct rwlock *lock;
6982 	pd_entry_t *pde;
6983 	pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
6984 	pt_entry_t newpte, origpte;
6985 	pv_entry_t pv;
6986 	vm_paddr_t opa, pa;
6987 	vm_page_t mpte, om;
6988 	int rv;
6989 	boolean_t nosleep;
6990 
6991 	PG_A = pmap_accessed_bit(pmap);
6992 	PG_G = pmap_global_bit(pmap);
6993 	PG_M = pmap_modified_bit(pmap);
6994 	PG_V = pmap_valid_bit(pmap);
6995 	PG_RW = pmap_rw_bit(pmap);
6996 
6997 	va = trunc_page(va);
6998 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
6999 	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
7000 	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
7001 	    va));
7002 	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
7003 	    ("pmap_enter: managed mapping within the clean submap"));
7004 	if ((m->oflags & VPO_UNMANAGED) == 0)
7005 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
7006 	KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
7007 	    ("pmap_enter: flags %u has reserved bits set", flags));
7008 	pa = VM_PAGE_TO_PHYS(m);
7009 	newpte = (pt_entry_t)(pa | PG_A | PG_V);
7010 	if ((flags & VM_PROT_WRITE) != 0)
7011 		newpte |= PG_M;
7012 	if ((prot & VM_PROT_WRITE) != 0)
7013 		newpte |= PG_RW;
7014 	KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
7015 	    ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
7016 	if ((prot & VM_PROT_EXECUTE) == 0)
7017 		newpte |= pg_nx;
7018 	if ((flags & PMAP_ENTER_WIRED) != 0)
7019 		newpte |= PG_W;
7020 	if (va < VM_MAXUSER_ADDRESS)
7021 		newpte |= PG_U;
7022 	if (pmap == kernel_pmap)
7023 		newpte |= PG_G;
7024 	newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
7025 
7026 	/*
7027 	 * Set modified bit gratuitously for writeable mappings if
7028 	 * the page is unmanaged. We do not want to take a fault
7029 	 * to do the dirty bit accounting for these mappings.
7030 	 */
7031 	if ((m->oflags & VPO_UNMANAGED) != 0) {
7032 		if ((newpte & PG_RW) != 0)
7033 			newpte |= PG_M;
7034 	} else
7035 		newpte |= PG_MANAGED;
7036 
7037 	lock = NULL;
7038 	PMAP_LOCK(pmap);
7039 	if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
7040 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
7041 		    ("managed largepage va %#lx flags %#x", va, flags));
7042 		rv = pmap_enter_largepage(pmap, va, newpte | PG_PS, flags,
7043 		    psind);
7044 		goto out;
7045 	}
7046 	if (psind == 1) {
7047 		/* Assert the required virtual and physical alignment. */
7048 		KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
7049 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
7050 		rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
7051 		goto out;
7052 	}
7053 	mpte = NULL;
7054 
7055 	/*
7056 	 * In the case that a page table page is not
7057 	 * resident, we are creating it here.
7058 	 */
7059 retry:
7060 	pde = pmap_pde(pmap, va);
7061 	if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
7062 	    pmap_demote_pde_locked(pmap, pde, va, &lock))) {
7063 		pte = pmap_pde_to_pte(pde, va);
7064 		if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
7065 			mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7066 			mpte->ref_count++;
7067 		}
7068 	} else if (va < VM_MAXUSER_ADDRESS) {
7069 		/*
7070 		 * Here if the pte page isn't mapped, or if it has been
7071 		 * deallocated.
7072 		 */
7073 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
7074 		mpte = pmap_allocpte_alloc(pmap, pmap_pde_pindex(va),
7075 		    nosleep ? NULL : &lock, va);
7076 		if (mpte == NULL && nosleep) {
7077 			rv = KERN_RESOURCE_SHORTAGE;
7078 			goto out;
7079 		}
7080 		goto retry;
7081 	} else
7082 		panic("pmap_enter: invalid page directory va=%#lx", va);
7083 
7084 	origpte = *pte;
7085 	pv = NULL;
7086 	if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86)
7087 		newpte |= pmap_pkru_get(pmap, va);
7088 
7089 	/*
7090 	 * Is the specified virtual address already mapped?
7091 	 */
7092 	if ((origpte & PG_V) != 0) {
7093 		/*
7094 		 * Wiring change, just update stats. We don't worry about
7095 		 * wiring PT pages as they remain resident as long as there
7096 		 * are valid mappings in them. Hence, if a user page is wired,
7097 		 * the PT page will be also.
7098 		 */
7099 		if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
7100 			pmap->pm_stats.wired_count++;
7101 		else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
7102 			pmap->pm_stats.wired_count--;
7103 
7104 		/*
7105 		 * Remove the extra PT page reference.
7106 		 */
7107 		if (mpte != NULL) {
7108 			mpte->ref_count--;
7109 			KASSERT(mpte->ref_count > 0,
7110 			    ("pmap_enter: missing reference to page table page,"
7111 			     " va: 0x%lx", va));
7112 		}
7113 
7114 		/*
7115 		 * Has the physical page changed?
7116 		 */
7117 		opa = origpte & PG_FRAME;
7118 		if (opa == pa) {
7119 			/*
7120 			 * No, might be a protection or wiring change.
7121 			 */
7122 			if ((origpte & PG_MANAGED) != 0 &&
7123 			    (newpte & PG_RW) != 0)
7124 				vm_page_aflag_set(m, PGA_WRITEABLE);
7125 			if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
7126 				goto unchanged;
7127 			goto validate;
7128 		}
7129 
7130 		/*
7131 		 * The physical page has changed.  Temporarily invalidate
7132 		 * the mapping.  This ensures that all threads sharing the
7133 		 * pmap keep a consistent view of the mapping, which is
7134 		 * necessary for the correct handling of COW faults.  It
7135 		 * also permits reuse of the old mapping's PV entry,
7136 		 * avoiding an allocation.
7137 		 *
7138 		 * For consistency, handle unmanaged mappings the same way.
7139 		 */
7140 		origpte = pte_load_clear(pte);
7141 		KASSERT((origpte & PG_FRAME) == opa,
7142 		    ("pmap_enter: unexpected pa update for %#lx", va));
7143 		if ((origpte & PG_MANAGED) != 0) {
7144 			om = PHYS_TO_VM_PAGE(opa);
7145 
7146 			/*
7147 			 * The pmap lock is sufficient to synchronize with
7148 			 * concurrent calls to pmap_page_test_mappings() and
7149 			 * pmap_ts_referenced().
7150 			 */
7151 			if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
7152 				vm_page_dirty(om);
7153 			if ((origpte & PG_A) != 0) {
7154 				pmap_invalidate_page(pmap, va);
7155 				vm_page_aflag_set(om, PGA_REFERENCED);
7156 			}
7157 			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
7158 			pv = pmap_pvh_remove(&om->md, pmap, va);
7159 			KASSERT(pv != NULL,
7160 			    ("pmap_enter: no PV entry for %#lx", va));
7161 			if ((newpte & PG_MANAGED) == 0)
7162 				free_pv_entry(pmap, pv);
7163 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
7164 			    TAILQ_EMPTY(&om->md.pv_list) &&
7165 			    ((om->flags & PG_FICTITIOUS) != 0 ||
7166 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
7167 				vm_page_aflag_clear(om, PGA_WRITEABLE);
7168 		} else {
7169 			/*
7170 			 * Since this mapping is unmanaged, assume that PG_A
7171 			 * is set.
7172 			 */
7173 			pmap_invalidate_page(pmap, va);
7174 		}
7175 		origpte = 0;
7176 	} else {
7177 		/*
7178 		 * Increment the counters.
7179 		 */
7180 		if ((newpte & PG_W) != 0)
7181 			pmap->pm_stats.wired_count++;
7182 		pmap_resident_count_adj(pmap, 1);
7183 	}
7184 
7185 	/*
7186 	 * Enter on the PV list if part of our managed memory.
7187 	 */
7188 	if ((newpte & PG_MANAGED) != 0) {
7189 		if (pv == NULL) {
7190 			pv = get_pv_entry(pmap, &lock);
7191 			pv->pv_va = va;
7192 		}
7193 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
7194 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
7195 		m->md.pv_gen++;
7196 		if ((newpte & PG_RW) != 0)
7197 			vm_page_aflag_set(m, PGA_WRITEABLE);
7198 	}
7199 
7200 	/*
7201 	 * Update the PTE.
7202 	 */
7203 	if ((origpte & PG_V) != 0) {
7204 validate:
7205 		origpte = pte_load_store(pte, newpte);
7206 		KASSERT((origpte & PG_FRAME) == pa,
7207 		    ("pmap_enter: unexpected pa update for %#lx", va));
7208 		if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
7209 		    (PG_M | PG_RW)) {
7210 			if ((origpte & PG_MANAGED) != 0)
7211 				vm_page_dirty(m);
7212 
7213 			/*
7214 			 * Although the PTE may still have PG_RW set, TLB
7215 			 * invalidation may nonetheless be required because
7216 			 * the PTE no longer has PG_M set.
7217 			 */
7218 		} else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
7219 			/*
7220 			 * This PTE change does not require TLB invalidation.
7221 			 */
7222 			goto unchanged;
7223 		}
7224 		if ((origpte & PG_A) != 0)
7225 			pmap_invalidate_page(pmap, va);
7226 	} else
7227 		pte_store(pte, newpte);
7228 
7229 unchanged:
7230 
7231 #if VM_NRESERVLEVEL > 0
7232 	/*
7233 	 * If both the page table page and the reservation are fully
7234 	 * populated, then attempt promotion.
7235 	 */
7236 	if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
7237 	    pmap_ps_enabled(pmap) &&
7238 	    (m->flags & PG_FICTITIOUS) == 0 &&
7239 	    vm_reserv_level_iffullpop(m) == 0)
7240 		pmap_promote_pde(pmap, pde, va, &lock);
7241 #endif
7242 
7243 	rv = KERN_SUCCESS;
7244 out:
7245 	if (lock != NULL)
7246 		rw_wunlock(lock);
7247 	PMAP_UNLOCK(pmap);
7248 	return (rv);
7249 }
7250 
7251 /*
7252  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
7253  * if successful.  Returns false if (1) a page table page cannot be allocated
7254  * without sleeping, (2) a mapping already exists at the specified virtual
7255  * address, or (3) a PV entry cannot be allocated without reclaiming another
7256  * PV entry.
7257  */
7258 static bool
7259 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
7260     struct rwlock **lockp)
7261 {
7262 	pd_entry_t newpde;
7263 	pt_entry_t PG_V;
7264 
7265 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7266 	PG_V = pmap_valid_bit(pmap);
7267 	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
7268 	    PG_PS | PG_V;
7269 	if ((m->oflags & VPO_UNMANAGED) == 0)
7270 		newpde |= PG_MANAGED;
7271 	if ((prot & VM_PROT_EXECUTE) == 0)
7272 		newpde |= pg_nx;
7273 	if (va < VM_MAXUSER_ADDRESS)
7274 		newpde |= PG_U;
7275 	return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
7276 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
7277 	    KERN_SUCCESS);
7278 }
7279 
7280 /*
7281  * Returns true if every page table entry in the specified page table page is
7282  * zero.
7283  */
7284 static bool
7285 pmap_every_pte_zero(vm_paddr_t pa)
7286 {
7287 	pt_entry_t *pt_end, *pte;
7288 
7289 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
7290 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
7291 	for (pt_end = pte + NPTEPG; pte < pt_end; pte++) {
7292 		if (*pte != 0)
7293 			return (false);
7294 	}
7295 	return (true);
7296 }
7297 
7298 /*
7299  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
7300  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
7301  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
7302  * a mapping already exists at the specified virtual address.  Returns
7303  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
7304  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
7305  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
7306  *
7307  * The parameter "m" is only used when creating a managed, writeable mapping.
7308  */
7309 static int
7310 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
7311     vm_page_t m, struct rwlock **lockp)
7312 {
7313 	struct spglist free;
7314 	pd_entry_t oldpde, *pde;
7315 	pt_entry_t PG_G, PG_RW, PG_V;
7316 	vm_page_t mt, pdpg;
7317 
7318 	KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0,
7319 	    ("pmap_enter_pde: cannot create wired user mapping"));
7320 	PG_G = pmap_global_bit(pmap);
7321 	PG_RW = pmap_rw_bit(pmap);
7322 	KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
7323 	    ("pmap_enter_pde: newpde is missing PG_M"));
7324 	PG_V = pmap_valid_bit(pmap);
7325 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7326 
7327 	if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
7328 	    newpde))) {
7329 		CTR2(KTR_PMAP, "pmap_enter_pde: 2m x blocked for va %#lx"
7330 		    " in pmap %p", va, pmap);
7331 		return (KERN_FAILURE);
7332 	}
7333 	if ((pde = pmap_alloc_pde(pmap, va, &pdpg, (flags &
7334 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
7335 		CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7336 		    " in pmap %p", va, pmap);
7337 		return (KERN_RESOURCE_SHORTAGE);
7338 	}
7339 
7340 	/*
7341 	 * If pkru is not same for the whole pde range, return failure
7342 	 * and let vm_fault() cope.  Check after pde allocation, since
7343 	 * it could sleep.
7344 	 */
7345 	if (!pmap_pkru_same(pmap, va, va + NBPDR)) {
7346 		pmap_abort_ptp(pmap, va, pdpg);
7347 		return (KERN_PROTECTION_FAILURE);
7348 	}
7349 	if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) {
7350 		newpde &= ~X86_PG_PKU_MASK;
7351 		newpde |= pmap_pkru_get(pmap, va);
7352 	}
7353 
7354 	/*
7355 	 * If there are existing mappings, either abort or remove them.
7356 	 */
7357 	oldpde = *pde;
7358 	if ((oldpde & PG_V) != 0) {
7359 		KASSERT(pdpg == NULL || pdpg->ref_count > 1,
7360 		    ("pmap_enter_pde: pdpg's reference count is too low"));
7361 		if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
7362 		    VM_MAXUSER_ADDRESS || (oldpde & PG_PS) != 0 ||
7363 		    !pmap_every_pte_zero(oldpde & PG_FRAME))) {
7364 			if (pdpg != NULL)
7365 				pdpg->ref_count--;
7366 			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7367 			    " in pmap %p", va, pmap);
7368 			return (KERN_FAILURE);
7369 		}
7370 		/* Break the existing mapping(s). */
7371 		SLIST_INIT(&free);
7372 		if ((oldpde & PG_PS) != 0) {
7373 			/*
7374 			 * The reference to the PD page that was acquired by
7375 			 * pmap_alloc_pde() ensures that it won't be freed.
7376 			 * However, if the PDE resulted from a promotion, then
7377 			 * a reserved PT page could be freed.
7378 			 */
7379 			(void)pmap_remove_pde(pmap, pde, va, &free, lockp);
7380 			if ((oldpde & PG_G) == 0)
7381 				pmap_invalidate_pde_page(pmap, va, oldpde);
7382 		} else {
7383 			pmap_delayed_invl_start();
7384 			if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
7385 			    lockp))
7386 		               pmap_invalidate_all(pmap);
7387 			pmap_delayed_invl_finish();
7388 		}
7389 		if (va < VM_MAXUSER_ADDRESS) {
7390 			vm_page_free_pages_toq(&free, true);
7391 			KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
7392 			    pde));
7393 		} else {
7394 			KASSERT(SLIST_EMPTY(&free),
7395 			    ("pmap_enter_pde: freed kernel page table page"));
7396 
7397 			/*
7398 			 * Both pmap_remove_pde() and pmap_remove_ptes() will
7399 			 * leave the kernel page table page zero filled.
7400 			 */
7401 			mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7402 			if (pmap_insert_pt_page(pmap, mt, false))
7403 				panic("pmap_enter_pde: trie insert failed");
7404 		}
7405 	}
7406 
7407 	if ((newpde & PG_MANAGED) != 0) {
7408 		/*
7409 		 * Abort this mapping if its PV entry could not be created.
7410 		 */
7411 		if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
7412 			if (pdpg != NULL)
7413 				pmap_abort_ptp(pmap, va, pdpg);
7414 			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7415 			    " in pmap %p", va, pmap);
7416 			return (KERN_RESOURCE_SHORTAGE);
7417 		}
7418 		if ((newpde & PG_RW) != 0) {
7419 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
7420 				vm_page_aflag_set(mt, PGA_WRITEABLE);
7421 		}
7422 	}
7423 
7424 	/*
7425 	 * Increment counters.
7426 	 */
7427 	if ((newpde & PG_W) != 0)
7428 		pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
7429 	pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE);
7430 
7431 	/*
7432 	 * Map the superpage.  (This is not a promoted mapping; there will not
7433 	 * be any lingering 4KB page mappings in the TLB.)
7434 	 */
7435 	pde_store(pde, newpde);
7436 
7437 	counter_u64_add(pmap_pde_mappings, 1);
7438 	CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
7439 	    va, pmap);
7440 	return (KERN_SUCCESS);
7441 }
7442 
7443 /*
7444  * Maps a sequence of resident pages belonging to the same object.
7445  * The sequence begins with the given page m_start.  This page is
7446  * mapped at the given virtual address start.  Each subsequent page is
7447  * mapped at a virtual address that is offset from start by the same
7448  * amount as the page is offset from m_start within the object.  The
7449  * last page in the sequence is the page with the largest offset from
7450  * m_start that can be mapped at a virtual address less than the given
7451  * virtual address end.  Not every virtual page between start and end
7452  * is mapped; only those for which a resident page exists with the
7453  * corresponding offset from m_start are mapped.
7454  */
7455 void
7456 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
7457     vm_page_t m_start, vm_prot_t prot)
7458 {
7459 	struct rwlock *lock;
7460 	vm_offset_t va;
7461 	vm_page_t m, mpte;
7462 	vm_pindex_t diff, psize;
7463 
7464 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
7465 
7466 	psize = atop(end - start);
7467 	mpte = NULL;
7468 	m = m_start;
7469 	lock = NULL;
7470 	PMAP_LOCK(pmap);
7471 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
7472 		va = start + ptoa(diff);
7473 		if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
7474 		    m->psind == 1 && pmap_ps_enabled(pmap) &&
7475 		    pmap_enter_2mpage(pmap, va, m, prot, &lock))
7476 			m = &m[NBPDR / PAGE_SIZE - 1];
7477 		else
7478 			mpte = pmap_enter_quick_locked(pmap, va, m, prot,
7479 			    mpte, &lock);
7480 		m = TAILQ_NEXT(m, listq);
7481 	}
7482 	if (lock != NULL)
7483 		rw_wunlock(lock);
7484 	PMAP_UNLOCK(pmap);
7485 }
7486 
7487 /*
7488  * this code makes some *MAJOR* assumptions:
7489  * 1. Current pmap & pmap exists.
7490  * 2. Not wired.
7491  * 3. Read access.
7492  * 4. No page table pages.
7493  * but is *MUCH* faster than pmap_enter...
7494  */
7495 
7496 void
7497 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
7498 {
7499 	struct rwlock *lock;
7500 
7501 	lock = NULL;
7502 	PMAP_LOCK(pmap);
7503 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
7504 	if (lock != NULL)
7505 		rw_wunlock(lock);
7506 	PMAP_UNLOCK(pmap);
7507 }
7508 
7509 static vm_page_t
7510 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
7511     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
7512 {
7513 	pt_entry_t newpte, *pte, PG_V;
7514 
7515 	KASSERT(!VA_IS_CLEANMAP(va) ||
7516 	    (m->oflags & VPO_UNMANAGED) != 0,
7517 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
7518 	PG_V = pmap_valid_bit(pmap);
7519 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7520 
7521 	/*
7522 	 * In the case that a page table page is not
7523 	 * resident, we are creating it here.
7524 	 */
7525 	if (va < VM_MAXUSER_ADDRESS) {
7526 		vm_pindex_t ptepindex;
7527 		pd_entry_t *ptepa;
7528 
7529 		/*
7530 		 * Calculate pagetable page index
7531 		 */
7532 		ptepindex = pmap_pde_pindex(va);
7533 		if (mpte && (mpte->pindex == ptepindex)) {
7534 			mpte->ref_count++;
7535 		} else {
7536 			/*
7537 			 * Get the page directory entry
7538 			 */
7539 			ptepa = pmap_pde(pmap, va);
7540 
7541 			/*
7542 			 * If the page table page is mapped, we just increment
7543 			 * the hold count, and activate it.  Otherwise, we
7544 			 * attempt to allocate a page table page.  If this
7545 			 * attempt fails, we don't retry.  Instead, we give up.
7546 			 */
7547 			if (ptepa && (*ptepa & PG_V) != 0) {
7548 				if (*ptepa & PG_PS)
7549 					return (NULL);
7550 				mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
7551 				mpte->ref_count++;
7552 			} else {
7553 				/*
7554 				 * Pass NULL instead of the PV list lock
7555 				 * pointer, because we don't intend to sleep.
7556 				 */
7557 				mpte = pmap_allocpte_alloc(pmap, ptepindex,
7558 				    NULL, va);
7559 				if (mpte == NULL)
7560 					return (mpte);
7561 			}
7562 		}
7563 		pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
7564 		pte = &pte[pmap_pte_index(va)];
7565 	} else {
7566 		mpte = NULL;
7567 		pte = vtopte(va);
7568 	}
7569 	if (*pte) {
7570 		if (mpte != NULL)
7571 			mpte->ref_count--;
7572 		return (NULL);
7573 	}
7574 
7575 	/*
7576 	 * Enter on the PV list if part of our managed memory.
7577 	 */
7578 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
7579 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
7580 		if (mpte != NULL)
7581 			pmap_abort_ptp(pmap, va, mpte);
7582 		return (NULL);
7583 	}
7584 
7585 	/*
7586 	 * Increment counters
7587 	 */
7588 	pmap_resident_count_adj(pmap, 1);
7589 
7590 	newpte = VM_PAGE_TO_PHYS(m) | PG_V |
7591 	    pmap_cache_bits(pmap, m->md.pat_mode, 0);
7592 	if ((m->oflags & VPO_UNMANAGED) == 0)
7593 		newpte |= PG_MANAGED;
7594 	if ((prot & VM_PROT_EXECUTE) == 0)
7595 		newpte |= pg_nx;
7596 	if (va < VM_MAXUSER_ADDRESS)
7597 		newpte |= PG_U | pmap_pkru_get(pmap, va);
7598 	pte_store(pte, newpte);
7599 	return (mpte);
7600 }
7601 
7602 /*
7603  * Make a temporary mapping for a physical address.  This is only intended
7604  * to be used for panic dumps.
7605  */
7606 void *
7607 pmap_kenter_temporary(vm_paddr_t pa, int i)
7608 {
7609 	vm_offset_t va;
7610 
7611 	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
7612 	pmap_kenter(va, pa);
7613 	invlpg(va);
7614 	return ((void *)crashdumpmap);
7615 }
7616 
7617 /*
7618  * This code maps large physical mmap regions into the
7619  * processor address space.  Note that some shortcuts
7620  * are taken, but the code works.
7621  */
7622 void
7623 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
7624     vm_pindex_t pindex, vm_size_t size)
7625 {
7626 	pd_entry_t *pde;
7627 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
7628 	vm_paddr_t pa, ptepa;
7629 	vm_page_t p, pdpg;
7630 	int pat_mode;
7631 
7632 	PG_A = pmap_accessed_bit(pmap);
7633 	PG_M = pmap_modified_bit(pmap);
7634 	PG_V = pmap_valid_bit(pmap);
7635 	PG_RW = pmap_rw_bit(pmap);
7636 
7637 	VM_OBJECT_ASSERT_WLOCKED(object);
7638 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
7639 	    ("pmap_object_init_pt: non-device object"));
7640 	if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
7641 		if (!pmap_ps_enabled(pmap))
7642 			return;
7643 		if (!vm_object_populate(object, pindex, pindex + atop(size)))
7644 			return;
7645 		p = vm_page_lookup(object, pindex);
7646 		KASSERT(p->valid == VM_PAGE_BITS_ALL,
7647 		    ("pmap_object_init_pt: invalid page %p", p));
7648 		pat_mode = p->md.pat_mode;
7649 
7650 		/*
7651 		 * Abort the mapping if the first page is not physically
7652 		 * aligned to a 2MB page boundary.
7653 		 */
7654 		ptepa = VM_PAGE_TO_PHYS(p);
7655 		if (ptepa & (NBPDR - 1))
7656 			return;
7657 
7658 		/*
7659 		 * Skip the first page.  Abort the mapping if the rest of
7660 		 * the pages are not physically contiguous or have differing
7661 		 * memory attributes.
7662 		 */
7663 		p = TAILQ_NEXT(p, listq);
7664 		for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
7665 		    pa += PAGE_SIZE) {
7666 			KASSERT(p->valid == VM_PAGE_BITS_ALL,
7667 			    ("pmap_object_init_pt: invalid page %p", p));
7668 			if (pa != VM_PAGE_TO_PHYS(p) ||
7669 			    pat_mode != p->md.pat_mode)
7670 				return;
7671 			p = TAILQ_NEXT(p, listq);
7672 		}
7673 
7674 		/*
7675 		 * Map using 2MB pages.  Since "ptepa" is 2M aligned and
7676 		 * "size" is a multiple of 2M, adding the PAT setting to "pa"
7677 		 * will not affect the termination of this loop.
7678 		 */
7679 		PMAP_LOCK(pmap);
7680 		for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
7681 		    pa < ptepa + size; pa += NBPDR) {
7682 			pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL);
7683 			if (pde == NULL) {
7684 				/*
7685 				 * The creation of mappings below is only an
7686 				 * optimization.  If a page directory page
7687 				 * cannot be allocated without blocking,
7688 				 * continue on to the next mapping rather than
7689 				 * blocking.
7690 				 */
7691 				addr += NBPDR;
7692 				continue;
7693 			}
7694 			if ((*pde & PG_V) == 0) {
7695 				pde_store(pde, pa | PG_PS | PG_M | PG_A |
7696 				    PG_U | PG_RW | PG_V);
7697 				pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE);
7698 				counter_u64_add(pmap_pde_mappings, 1);
7699 			} else {
7700 				/* Continue on if the PDE is already valid. */
7701 				pdpg->ref_count--;
7702 				KASSERT(pdpg->ref_count > 0,
7703 				    ("pmap_object_init_pt: missing reference "
7704 				    "to page directory page, va: 0x%lx", addr));
7705 			}
7706 			addr += NBPDR;
7707 		}
7708 		PMAP_UNLOCK(pmap);
7709 	}
7710 }
7711 
7712 /*
7713  *	Clear the wired attribute from the mappings for the specified range of
7714  *	addresses in the given pmap.  Every valid mapping within that range
7715  *	must have the wired attribute set.  In contrast, invalid mappings
7716  *	cannot have the wired attribute set, so they are ignored.
7717  *
7718  *	The wired attribute of the page table entry is not a hardware
7719  *	feature, so there is no need to invalidate any TLB entries.
7720  *	Since pmap_demote_pde() for the wired entry must never fail,
7721  *	pmap_delayed_invl_start()/finish() calls around the
7722  *	function are not needed.
7723  */
7724 void
7725 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
7726 {
7727 	vm_offset_t va_next;
7728 	pml4_entry_t *pml4e;
7729 	pdp_entry_t *pdpe;
7730 	pd_entry_t *pde;
7731 	pt_entry_t *pte, PG_V, PG_G __diagused;
7732 
7733 	PG_V = pmap_valid_bit(pmap);
7734 	PG_G = pmap_global_bit(pmap);
7735 	PMAP_LOCK(pmap);
7736 	for (; sva < eva; sva = va_next) {
7737 		pml4e = pmap_pml4e(pmap, sva);
7738 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
7739 			va_next = (sva + NBPML4) & ~PML4MASK;
7740 			if (va_next < sva)
7741 				va_next = eva;
7742 			continue;
7743 		}
7744 
7745 		va_next = (sva + NBPDP) & ~PDPMASK;
7746 		if (va_next < sva)
7747 			va_next = eva;
7748 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
7749 		if ((*pdpe & PG_V) == 0)
7750 			continue;
7751 		if ((*pdpe & PG_PS) != 0) {
7752 			KASSERT(va_next <= eva,
7753 			    ("partial update of non-transparent 1G mapping "
7754 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
7755 			    *pdpe, sva, eva, va_next));
7756 			MPASS(pmap != kernel_pmap); /* XXXKIB */
7757 			MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
7758 			atomic_clear_long(pdpe, PG_W);
7759 			pmap->pm_stats.wired_count -= NBPDP / PAGE_SIZE;
7760 			continue;
7761 		}
7762 
7763 		va_next = (sva + NBPDR) & ~PDRMASK;
7764 		if (va_next < sva)
7765 			va_next = eva;
7766 		pde = pmap_pdpe_to_pde(pdpe, sva);
7767 		if ((*pde & PG_V) == 0)
7768 			continue;
7769 		if ((*pde & PG_PS) != 0) {
7770 			if ((*pde & PG_W) == 0)
7771 				panic("pmap_unwire: pde %#jx is missing PG_W",
7772 				    (uintmax_t)*pde);
7773 
7774 			/*
7775 			 * Are we unwiring the entire large page?  If not,
7776 			 * demote the mapping and fall through.
7777 			 */
7778 			if (sva + NBPDR == va_next && eva >= va_next) {
7779 				atomic_clear_long(pde, PG_W);
7780 				pmap->pm_stats.wired_count -= NBPDR /
7781 				    PAGE_SIZE;
7782 				continue;
7783 			} else if (!pmap_demote_pde(pmap, pde, sva))
7784 				panic("pmap_unwire: demotion failed");
7785 		}
7786 		if (va_next > eva)
7787 			va_next = eva;
7788 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
7789 		    sva += PAGE_SIZE) {
7790 			if ((*pte & PG_V) == 0)
7791 				continue;
7792 			if ((*pte & PG_W) == 0)
7793 				panic("pmap_unwire: pte %#jx is missing PG_W",
7794 				    (uintmax_t)*pte);
7795 
7796 			/*
7797 			 * PG_W must be cleared atomically.  Although the pmap
7798 			 * lock synchronizes access to PG_W, another processor
7799 			 * could be setting PG_M and/or PG_A concurrently.
7800 			 */
7801 			atomic_clear_long(pte, PG_W);
7802 			pmap->pm_stats.wired_count--;
7803 		}
7804 	}
7805 	PMAP_UNLOCK(pmap);
7806 }
7807 
7808 /*
7809  *	Copy the range specified by src_addr/len
7810  *	from the source map to the range dst_addr/len
7811  *	in the destination map.
7812  *
7813  *	This routine is only advisory and need not do anything.
7814  */
7815 void
7816 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
7817     vm_offset_t src_addr)
7818 {
7819 	struct rwlock *lock;
7820 	pml4_entry_t *pml4e;
7821 	pdp_entry_t *pdpe;
7822 	pd_entry_t *pde, srcptepaddr;
7823 	pt_entry_t *dst_pte, PG_A, PG_M, PG_V, ptetemp, *src_pte;
7824 	vm_offset_t addr, end_addr, va_next;
7825 	vm_page_t dst_pdpg, dstmpte, srcmpte;
7826 
7827 	if (dst_addr != src_addr)
7828 		return;
7829 
7830 	if (dst_pmap->pm_type != src_pmap->pm_type)
7831 		return;
7832 
7833 	/*
7834 	 * EPT page table entries that require emulation of A/D bits are
7835 	 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
7836 	 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
7837 	 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
7838 	 * implementations flag an EPT misconfiguration for exec-only
7839 	 * mappings we skip this function entirely for emulated pmaps.
7840 	 */
7841 	if (pmap_emulate_ad_bits(dst_pmap))
7842 		return;
7843 
7844 	end_addr = src_addr + len;
7845 	lock = NULL;
7846 	if (dst_pmap < src_pmap) {
7847 		PMAP_LOCK(dst_pmap);
7848 		PMAP_LOCK(src_pmap);
7849 	} else {
7850 		PMAP_LOCK(src_pmap);
7851 		PMAP_LOCK(dst_pmap);
7852 	}
7853 
7854 	PG_A = pmap_accessed_bit(dst_pmap);
7855 	PG_M = pmap_modified_bit(dst_pmap);
7856 	PG_V = pmap_valid_bit(dst_pmap);
7857 
7858 	for (addr = src_addr; addr < end_addr; addr = va_next) {
7859 		KASSERT(addr < UPT_MIN_ADDRESS,
7860 		    ("pmap_copy: invalid to pmap_copy page tables"));
7861 
7862 		pml4e = pmap_pml4e(src_pmap, addr);
7863 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
7864 			va_next = (addr + NBPML4) & ~PML4MASK;
7865 			if (va_next < addr)
7866 				va_next = end_addr;
7867 			continue;
7868 		}
7869 
7870 		va_next = (addr + NBPDP) & ~PDPMASK;
7871 		if (va_next < addr)
7872 			va_next = end_addr;
7873 		pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
7874 		if ((*pdpe & PG_V) == 0)
7875 			continue;
7876 		if ((*pdpe & PG_PS) != 0) {
7877 			KASSERT(va_next <= end_addr,
7878 			    ("partial update of non-transparent 1G mapping "
7879 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
7880 			    *pdpe, addr, end_addr, va_next));
7881 			MPASS((addr & PDPMASK) == 0);
7882 			MPASS((*pdpe & PG_MANAGED) == 0);
7883 			srcptepaddr = *pdpe;
7884 			pdpe = pmap_pdpe(dst_pmap, addr);
7885 			if (pdpe == NULL) {
7886 				if (pmap_allocpte_alloc(dst_pmap,
7887 				    pmap_pml4e_pindex(addr), NULL, addr) ==
7888 				    NULL)
7889 					break;
7890 				pdpe = pmap_pdpe(dst_pmap, addr);
7891 			} else {
7892 				pml4e = pmap_pml4e(dst_pmap, addr);
7893 				dst_pdpg = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
7894 				dst_pdpg->ref_count++;
7895 			}
7896 			KASSERT(*pdpe == 0,
7897 			    ("1G mapping present in dst pmap "
7898 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
7899 			    *pdpe, addr, end_addr, va_next));
7900 			*pdpe = srcptepaddr & ~PG_W;
7901 			pmap_resident_count_adj(dst_pmap, NBPDP / PAGE_SIZE);
7902 			continue;
7903 		}
7904 
7905 		va_next = (addr + NBPDR) & ~PDRMASK;
7906 		if (va_next < addr)
7907 			va_next = end_addr;
7908 
7909 		pde = pmap_pdpe_to_pde(pdpe, addr);
7910 		srcptepaddr = *pde;
7911 		if (srcptepaddr == 0)
7912 			continue;
7913 
7914 		if (srcptepaddr & PG_PS) {
7915 			/*
7916 			 * We can only virtual copy whole superpages.
7917 			 */
7918 			if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
7919 				continue;
7920 			pde = pmap_alloc_pde(dst_pmap, addr, &dst_pdpg, NULL);
7921 			if (pde == NULL)
7922 				break;
7923 			if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
7924 			    pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
7925 			    PMAP_ENTER_NORECLAIM, &lock))) {
7926 				/*
7927 				 * We leave the dirty bit unchanged because
7928 				 * managed read/write superpage mappings are
7929 				 * required to be dirty.  However, managed
7930 				 * superpage mappings are not required to
7931 				 * have their accessed bit set, so we clear
7932 				 * it because we don't know if this mapping
7933 				 * will be used.
7934 				 */
7935 				srcptepaddr &= ~PG_W;
7936 				if ((srcptepaddr & PG_MANAGED) != 0)
7937 					srcptepaddr &= ~PG_A;
7938 				*pde = srcptepaddr;
7939 				pmap_resident_count_adj(dst_pmap, NBPDR /
7940 				    PAGE_SIZE);
7941 				counter_u64_add(pmap_pde_mappings, 1);
7942 			} else
7943 				pmap_abort_ptp(dst_pmap, addr, dst_pdpg);
7944 			continue;
7945 		}
7946 
7947 		srcptepaddr &= PG_FRAME;
7948 		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
7949 		KASSERT(srcmpte->ref_count > 0,
7950 		    ("pmap_copy: source page table page is unused"));
7951 
7952 		if (va_next > end_addr)
7953 			va_next = end_addr;
7954 
7955 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
7956 		src_pte = &src_pte[pmap_pte_index(addr)];
7957 		dstmpte = NULL;
7958 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
7959 			ptetemp = *src_pte;
7960 
7961 			/*
7962 			 * We only virtual copy managed pages.
7963 			 */
7964 			if ((ptetemp & PG_MANAGED) == 0)
7965 				continue;
7966 
7967 			if (dstmpte != NULL) {
7968 				KASSERT(dstmpte->pindex ==
7969 				    pmap_pde_pindex(addr),
7970 				    ("dstmpte pindex/addr mismatch"));
7971 				dstmpte->ref_count++;
7972 			} else if ((dstmpte = pmap_allocpte(dst_pmap, addr,
7973 			    NULL)) == NULL)
7974 				goto out;
7975 			dst_pte = (pt_entry_t *)
7976 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
7977 			dst_pte = &dst_pte[pmap_pte_index(addr)];
7978 			if (*dst_pte == 0 &&
7979 			    pmap_try_insert_pv_entry(dst_pmap, addr,
7980 			    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), &lock)) {
7981 				/*
7982 				 * Clear the wired, modified, and accessed
7983 				 * (referenced) bits during the copy.
7984 				 */
7985 				*dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
7986 				pmap_resident_count_adj(dst_pmap, 1);
7987 			} else {
7988 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
7989 				goto out;
7990 			}
7991 			/* Have we copied all of the valid mappings? */
7992 			if (dstmpte->ref_count >= srcmpte->ref_count)
7993 				break;
7994 		}
7995 	}
7996 out:
7997 	if (lock != NULL)
7998 		rw_wunlock(lock);
7999 	PMAP_UNLOCK(src_pmap);
8000 	PMAP_UNLOCK(dst_pmap);
8001 }
8002 
8003 int
8004 pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap)
8005 {
8006 	int error;
8007 
8008 	if (dst_pmap->pm_type != src_pmap->pm_type ||
8009 	    dst_pmap->pm_type != PT_X86 ||
8010 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
8011 		return (0);
8012 	for (;;) {
8013 		if (dst_pmap < src_pmap) {
8014 			PMAP_LOCK(dst_pmap);
8015 			PMAP_LOCK(src_pmap);
8016 		} else {
8017 			PMAP_LOCK(src_pmap);
8018 			PMAP_LOCK(dst_pmap);
8019 		}
8020 		error = pmap_pkru_copy(dst_pmap, src_pmap);
8021 		/* Clean up partial copy on failure due to no memory. */
8022 		if (error == ENOMEM)
8023 			pmap_pkru_deassign_all(dst_pmap);
8024 		PMAP_UNLOCK(src_pmap);
8025 		PMAP_UNLOCK(dst_pmap);
8026 		if (error != ENOMEM)
8027 			break;
8028 		vm_wait(NULL);
8029 	}
8030 	return (error);
8031 }
8032 
8033 /*
8034  * Zero the specified hardware page.
8035  */
8036 void
8037 pmap_zero_page(vm_page_t m)
8038 {
8039 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
8040 
8041 	pagezero((void *)va);
8042 }
8043 
8044 /*
8045  * Zero an an area within a single hardware page.  off and size must not
8046  * cover an area beyond a single hardware page.
8047  */
8048 void
8049 pmap_zero_page_area(vm_page_t m, int off, int size)
8050 {
8051 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
8052 
8053 	if (off == 0 && size == PAGE_SIZE)
8054 		pagezero((void *)va);
8055 	else
8056 		bzero((char *)va + off, size);
8057 }
8058 
8059 /*
8060  * Copy 1 specified hardware page to another.
8061  */
8062 void
8063 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
8064 {
8065 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
8066 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
8067 
8068 	pagecopy((void *)src, (void *)dst);
8069 }
8070 
8071 int unmapped_buf_allowed = 1;
8072 
8073 void
8074 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
8075     vm_offset_t b_offset, int xfersize)
8076 {
8077 	void *a_cp, *b_cp;
8078 	vm_page_t pages[2];
8079 	vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
8080 	int cnt;
8081 	boolean_t mapped;
8082 
8083 	while (xfersize > 0) {
8084 		a_pg_offset = a_offset & PAGE_MASK;
8085 		pages[0] = ma[a_offset >> PAGE_SHIFT];
8086 		b_pg_offset = b_offset & PAGE_MASK;
8087 		pages[1] = mb[b_offset >> PAGE_SHIFT];
8088 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
8089 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
8090 		mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE);
8091 		a_cp = (char *)vaddr[0] + a_pg_offset;
8092 		b_cp = (char *)vaddr[1] + b_pg_offset;
8093 		bcopy(a_cp, b_cp, cnt);
8094 		if (__predict_false(mapped))
8095 			pmap_unmap_io_transient(pages, vaddr, 2, FALSE);
8096 		a_offset += cnt;
8097 		b_offset += cnt;
8098 		xfersize -= cnt;
8099 	}
8100 }
8101 
8102 /*
8103  * Returns true if the pmap's pv is one of the first
8104  * 16 pvs linked to from this page.  This count may
8105  * be changed upwards or downwards in the future; it
8106  * is only necessary that true be returned for a small
8107  * subset of pmaps for proper page aging.
8108  */
8109 boolean_t
8110 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
8111 {
8112 	struct md_page *pvh;
8113 	struct rwlock *lock;
8114 	pv_entry_t pv;
8115 	int loops = 0;
8116 	boolean_t rv;
8117 
8118 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8119 	    ("pmap_page_exists_quick: page %p is not managed", m));
8120 	rv = FALSE;
8121 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8122 	rw_rlock(lock);
8123 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8124 		if (PV_PMAP(pv) == pmap) {
8125 			rv = TRUE;
8126 			break;
8127 		}
8128 		loops++;
8129 		if (loops >= 16)
8130 			break;
8131 	}
8132 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
8133 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8134 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8135 			if (PV_PMAP(pv) == pmap) {
8136 				rv = TRUE;
8137 				break;
8138 			}
8139 			loops++;
8140 			if (loops >= 16)
8141 				break;
8142 		}
8143 	}
8144 	rw_runlock(lock);
8145 	return (rv);
8146 }
8147 
8148 /*
8149  *	pmap_page_wired_mappings:
8150  *
8151  *	Return the number of managed mappings to the given physical page
8152  *	that are wired.
8153  */
8154 int
8155 pmap_page_wired_mappings(vm_page_t m)
8156 {
8157 	struct rwlock *lock;
8158 	struct md_page *pvh;
8159 	pmap_t pmap;
8160 	pt_entry_t *pte;
8161 	pv_entry_t pv;
8162 	int count, md_gen, pvh_gen;
8163 
8164 	if ((m->oflags & VPO_UNMANAGED) != 0)
8165 		return (0);
8166 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8167 	rw_rlock(lock);
8168 restart:
8169 	count = 0;
8170 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8171 		pmap = PV_PMAP(pv);
8172 		if (!PMAP_TRYLOCK(pmap)) {
8173 			md_gen = m->md.pv_gen;
8174 			rw_runlock(lock);
8175 			PMAP_LOCK(pmap);
8176 			rw_rlock(lock);
8177 			if (md_gen != m->md.pv_gen) {
8178 				PMAP_UNLOCK(pmap);
8179 				goto restart;
8180 			}
8181 		}
8182 		pte = pmap_pte(pmap, pv->pv_va);
8183 		if ((*pte & PG_W) != 0)
8184 			count++;
8185 		PMAP_UNLOCK(pmap);
8186 	}
8187 	if ((m->flags & PG_FICTITIOUS) == 0) {
8188 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8189 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8190 			pmap = PV_PMAP(pv);
8191 			if (!PMAP_TRYLOCK(pmap)) {
8192 				md_gen = m->md.pv_gen;
8193 				pvh_gen = pvh->pv_gen;
8194 				rw_runlock(lock);
8195 				PMAP_LOCK(pmap);
8196 				rw_rlock(lock);
8197 				if (md_gen != m->md.pv_gen ||
8198 				    pvh_gen != pvh->pv_gen) {
8199 					PMAP_UNLOCK(pmap);
8200 					goto restart;
8201 				}
8202 			}
8203 			pte = pmap_pde(pmap, pv->pv_va);
8204 			if ((*pte & PG_W) != 0)
8205 				count++;
8206 			PMAP_UNLOCK(pmap);
8207 		}
8208 	}
8209 	rw_runlock(lock);
8210 	return (count);
8211 }
8212 
8213 /*
8214  * Returns TRUE if the given page is mapped individually or as part of
8215  * a 2mpage.  Otherwise, returns FALSE.
8216  */
8217 boolean_t
8218 pmap_page_is_mapped(vm_page_t m)
8219 {
8220 	struct rwlock *lock;
8221 	boolean_t rv;
8222 
8223 	if ((m->oflags & VPO_UNMANAGED) != 0)
8224 		return (FALSE);
8225 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8226 	rw_rlock(lock);
8227 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
8228 	    ((m->flags & PG_FICTITIOUS) == 0 &&
8229 	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
8230 	rw_runlock(lock);
8231 	return (rv);
8232 }
8233 
8234 /*
8235  * Destroy all managed, non-wired mappings in the given user-space
8236  * pmap.  This pmap cannot be active on any processor besides the
8237  * caller.
8238  *
8239  * This function cannot be applied to the kernel pmap.  Moreover, it
8240  * is not intended for general use.  It is only to be used during
8241  * process termination.  Consequently, it can be implemented in ways
8242  * that make it faster than pmap_remove().  First, it can more quickly
8243  * destroy mappings by iterating over the pmap's collection of PV
8244  * entries, rather than searching the page table.  Second, it doesn't
8245  * have to test and clear the page table entries atomically, because
8246  * no processor is currently accessing the user address space.  In
8247  * particular, a page table entry's dirty bit won't change state once
8248  * this function starts.
8249  *
8250  * Although this function destroys all of the pmap's managed,
8251  * non-wired mappings, it can delay and batch the invalidation of TLB
8252  * entries without calling pmap_delayed_invl_start() and
8253  * pmap_delayed_invl_finish().  Because the pmap is not active on
8254  * any other processor, none of these TLB entries will ever be used
8255  * before their eventual invalidation.  Consequently, there is no need
8256  * for either pmap_remove_all() or pmap_remove_write() to wait for
8257  * that eventual TLB invalidation.
8258  */
8259 void
8260 pmap_remove_pages(pmap_t pmap)
8261 {
8262 	pd_entry_t ptepde;
8263 	pt_entry_t *pte, tpte;
8264 	pt_entry_t PG_M, PG_RW, PG_V;
8265 	struct spglist free;
8266 	struct pv_chunklist free_chunks[PMAP_MEMDOM];
8267 	vm_page_t m, mpte, mt;
8268 	pv_entry_t pv;
8269 	struct md_page *pvh;
8270 	struct pv_chunk *pc, *npc;
8271 	struct rwlock *lock;
8272 	int64_t bit;
8273 	uint64_t inuse, bitmask;
8274 	int allfree, field, freed, i, idx;
8275 	boolean_t superpage;
8276 	vm_paddr_t pa;
8277 
8278 	/*
8279 	 * Assert that the given pmap is only active on the current
8280 	 * CPU.  Unfortunately, we cannot block another CPU from
8281 	 * activating the pmap while this function is executing.
8282 	 */
8283 	KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
8284 #ifdef INVARIANTS
8285 	{
8286 		cpuset_t other_cpus;
8287 
8288 		other_cpus = all_cpus;
8289 		critical_enter();
8290 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
8291 		CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
8292 		critical_exit();
8293 		KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
8294 	}
8295 #endif
8296 
8297 	lock = NULL;
8298 	PG_M = pmap_modified_bit(pmap);
8299 	PG_V = pmap_valid_bit(pmap);
8300 	PG_RW = pmap_rw_bit(pmap);
8301 
8302 	for (i = 0; i < PMAP_MEMDOM; i++)
8303 		TAILQ_INIT(&free_chunks[i]);
8304 	SLIST_INIT(&free);
8305 	PMAP_LOCK(pmap);
8306 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
8307 		allfree = 1;
8308 		freed = 0;
8309 		for (field = 0; field < _NPCM; field++) {
8310 			inuse = ~pc->pc_map[field] & pc_freemask[field];
8311 			while (inuse != 0) {
8312 				bit = bsfq(inuse);
8313 				bitmask = 1UL << bit;
8314 				idx = field * 64 + bit;
8315 				pv = &pc->pc_pventry[idx];
8316 				inuse &= ~bitmask;
8317 
8318 				pte = pmap_pdpe(pmap, pv->pv_va);
8319 				ptepde = *pte;
8320 				pte = pmap_pdpe_to_pde(pte, pv->pv_va);
8321 				tpte = *pte;
8322 				if ((tpte & (PG_PS | PG_V)) == PG_V) {
8323 					superpage = FALSE;
8324 					ptepde = tpte;
8325 					pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
8326 					    PG_FRAME);
8327 					pte = &pte[pmap_pte_index(pv->pv_va)];
8328 					tpte = *pte;
8329 				} else {
8330 					/*
8331 					 * Keep track whether 'tpte' is a
8332 					 * superpage explicitly instead of
8333 					 * relying on PG_PS being set.
8334 					 *
8335 					 * This is because PG_PS is numerically
8336 					 * identical to PG_PTE_PAT and thus a
8337 					 * regular page could be mistaken for
8338 					 * a superpage.
8339 					 */
8340 					superpage = TRUE;
8341 				}
8342 
8343 				if ((tpte & PG_V) == 0) {
8344 					panic("bad pte va %lx pte %lx",
8345 					    pv->pv_va, tpte);
8346 				}
8347 
8348 /*
8349  * We cannot remove wired pages from a process' mapping at this time
8350  */
8351 				if (tpte & PG_W) {
8352 					allfree = 0;
8353 					continue;
8354 				}
8355 
8356 				/* Mark free */
8357 				pc->pc_map[field] |= bitmask;
8358 
8359 				/*
8360 				 * Because this pmap is not active on other
8361 				 * processors, the dirty bit cannot have
8362 				 * changed state since we last loaded pte.
8363 				 */
8364 				pte_clear(pte);
8365 
8366 				if (superpage)
8367 					pa = tpte & PG_PS_FRAME;
8368 				else
8369 					pa = tpte & PG_FRAME;
8370 
8371 				m = PHYS_TO_VM_PAGE(pa);
8372 				KASSERT(m->phys_addr == pa,
8373 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
8374 				    m, (uintmax_t)m->phys_addr,
8375 				    (uintmax_t)tpte));
8376 
8377 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
8378 				    m < &vm_page_array[vm_page_array_size],
8379 				    ("pmap_remove_pages: bad tpte %#jx",
8380 				    (uintmax_t)tpte));
8381 
8382 				/*
8383 				 * Update the vm_page_t clean/reference bits.
8384 				 */
8385 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8386 					if (superpage) {
8387 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8388 							vm_page_dirty(mt);
8389 					} else
8390 						vm_page_dirty(m);
8391 				}
8392 
8393 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
8394 
8395 				if (superpage) {
8396 					pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE);
8397 					pvh = pa_to_pvh(tpte & PG_PS_FRAME);
8398 					TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
8399 					pvh->pv_gen++;
8400 					if (TAILQ_EMPTY(&pvh->pv_list)) {
8401 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8402 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
8403 							    TAILQ_EMPTY(&mt->md.pv_list))
8404 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
8405 					}
8406 					mpte = pmap_remove_pt_page(pmap, pv->pv_va);
8407 					if (mpte != NULL) {
8408 						KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
8409 						    ("pmap_remove_pages: pte page not promoted"));
8410 						pmap_pt_page_count_adj(pmap, -1);
8411 						KASSERT(mpte->ref_count == NPTEPG,
8412 						    ("pmap_remove_pages: pte page reference count error"));
8413 						mpte->ref_count = 0;
8414 						pmap_add_delayed_free_list(mpte, &free, FALSE);
8415 					}
8416 				} else {
8417 					pmap_resident_count_adj(pmap, -1);
8418 					TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
8419 					m->md.pv_gen++;
8420 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
8421 					    TAILQ_EMPTY(&m->md.pv_list) &&
8422 					    (m->flags & PG_FICTITIOUS) == 0) {
8423 						pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8424 						if (TAILQ_EMPTY(&pvh->pv_list))
8425 							vm_page_aflag_clear(m, PGA_WRITEABLE);
8426 					}
8427 				}
8428 				pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
8429 				freed++;
8430 			}
8431 		}
8432 		PV_STAT(counter_u64_add(pv_entry_frees, freed));
8433 		PV_STAT(counter_u64_add(pv_entry_spare, freed));
8434 		PV_STAT(counter_u64_add(pv_entry_count, -freed));
8435 		if (allfree) {
8436 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
8437 			TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc, pc_list);
8438 		}
8439 	}
8440 	if (lock != NULL)
8441 		rw_wunlock(lock);
8442 	pmap_invalidate_all(pmap);
8443 	pmap_pkru_deassign_all(pmap);
8444 	free_pv_chunk_batch((struct pv_chunklist *)&free_chunks);
8445 	PMAP_UNLOCK(pmap);
8446 	vm_page_free_pages_toq(&free, true);
8447 }
8448 
8449 static boolean_t
8450 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
8451 {
8452 	struct rwlock *lock;
8453 	pv_entry_t pv;
8454 	struct md_page *pvh;
8455 	pt_entry_t *pte, mask;
8456 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
8457 	pmap_t pmap;
8458 	int md_gen, pvh_gen;
8459 	boolean_t rv;
8460 
8461 	rv = FALSE;
8462 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8463 	rw_rlock(lock);
8464 restart:
8465 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8466 		pmap = PV_PMAP(pv);
8467 		if (!PMAP_TRYLOCK(pmap)) {
8468 			md_gen = m->md.pv_gen;
8469 			rw_runlock(lock);
8470 			PMAP_LOCK(pmap);
8471 			rw_rlock(lock);
8472 			if (md_gen != m->md.pv_gen) {
8473 				PMAP_UNLOCK(pmap);
8474 				goto restart;
8475 			}
8476 		}
8477 		pte = pmap_pte(pmap, pv->pv_va);
8478 		mask = 0;
8479 		if (modified) {
8480 			PG_M = pmap_modified_bit(pmap);
8481 			PG_RW = pmap_rw_bit(pmap);
8482 			mask |= PG_RW | PG_M;
8483 		}
8484 		if (accessed) {
8485 			PG_A = pmap_accessed_bit(pmap);
8486 			PG_V = pmap_valid_bit(pmap);
8487 			mask |= PG_V | PG_A;
8488 		}
8489 		rv = (*pte & mask) == mask;
8490 		PMAP_UNLOCK(pmap);
8491 		if (rv)
8492 			goto out;
8493 	}
8494 	if ((m->flags & PG_FICTITIOUS) == 0) {
8495 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8496 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8497 			pmap = PV_PMAP(pv);
8498 			if (!PMAP_TRYLOCK(pmap)) {
8499 				md_gen = m->md.pv_gen;
8500 				pvh_gen = pvh->pv_gen;
8501 				rw_runlock(lock);
8502 				PMAP_LOCK(pmap);
8503 				rw_rlock(lock);
8504 				if (md_gen != m->md.pv_gen ||
8505 				    pvh_gen != pvh->pv_gen) {
8506 					PMAP_UNLOCK(pmap);
8507 					goto restart;
8508 				}
8509 			}
8510 			pte = pmap_pde(pmap, pv->pv_va);
8511 			mask = 0;
8512 			if (modified) {
8513 				PG_M = pmap_modified_bit(pmap);
8514 				PG_RW = pmap_rw_bit(pmap);
8515 				mask |= PG_RW | PG_M;
8516 			}
8517 			if (accessed) {
8518 				PG_A = pmap_accessed_bit(pmap);
8519 				PG_V = pmap_valid_bit(pmap);
8520 				mask |= PG_V | PG_A;
8521 			}
8522 			rv = (*pte & mask) == mask;
8523 			PMAP_UNLOCK(pmap);
8524 			if (rv)
8525 				goto out;
8526 		}
8527 	}
8528 out:
8529 	rw_runlock(lock);
8530 	return (rv);
8531 }
8532 
8533 /*
8534  *	pmap_is_modified:
8535  *
8536  *	Return whether or not the specified physical page was modified
8537  *	in any physical maps.
8538  */
8539 boolean_t
8540 pmap_is_modified(vm_page_t m)
8541 {
8542 
8543 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8544 	    ("pmap_is_modified: page %p is not managed", m));
8545 
8546 	/*
8547 	 * If the page is not busied then this check is racy.
8548 	 */
8549 	if (!pmap_page_is_write_mapped(m))
8550 		return (FALSE);
8551 	return (pmap_page_test_mappings(m, FALSE, TRUE));
8552 }
8553 
8554 /*
8555  *	pmap_is_prefaultable:
8556  *
8557  *	Return whether or not the specified virtual address is eligible
8558  *	for prefault.
8559  */
8560 boolean_t
8561 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
8562 {
8563 	pd_entry_t *pde;
8564 	pt_entry_t *pte, PG_V;
8565 	boolean_t rv;
8566 
8567 	PG_V = pmap_valid_bit(pmap);
8568 
8569 	/*
8570 	 * Return TRUE if and only if the PTE for the specified virtual
8571 	 * address is allocated but invalid.
8572 	 */
8573 	rv = FALSE;
8574 	PMAP_LOCK(pmap);
8575 	pde = pmap_pde(pmap, addr);
8576 	if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
8577 		pte = pmap_pde_to_pte(pde, addr);
8578 		rv = (*pte & PG_V) == 0;
8579 	}
8580 	PMAP_UNLOCK(pmap);
8581 	return (rv);
8582 }
8583 
8584 /*
8585  *	pmap_is_referenced:
8586  *
8587  *	Return whether or not the specified physical page was referenced
8588  *	in any physical maps.
8589  */
8590 boolean_t
8591 pmap_is_referenced(vm_page_t m)
8592 {
8593 
8594 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8595 	    ("pmap_is_referenced: page %p is not managed", m));
8596 	return (pmap_page_test_mappings(m, TRUE, FALSE));
8597 }
8598 
8599 /*
8600  * Clear the write and modified bits in each of the given page's mappings.
8601  */
8602 void
8603 pmap_remove_write(vm_page_t m)
8604 {
8605 	struct md_page *pvh;
8606 	pmap_t pmap;
8607 	struct rwlock *lock;
8608 	pv_entry_t next_pv, pv;
8609 	pd_entry_t *pde;
8610 	pt_entry_t oldpte, *pte, PG_M, PG_RW;
8611 	vm_offset_t va;
8612 	int pvh_gen, md_gen;
8613 
8614 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8615 	    ("pmap_remove_write: page %p is not managed", m));
8616 
8617 	vm_page_assert_busied(m);
8618 	if (!pmap_page_is_write_mapped(m))
8619 		return;
8620 
8621 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8622 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
8623 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
8624 	rw_wlock(lock);
8625 retry:
8626 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
8627 		pmap = PV_PMAP(pv);
8628 		if (!PMAP_TRYLOCK(pmap)) {
8629 			pvh_gen = pvh->pv_gen;
8630 			rw_wunlock(lock);
8631 			PMAP_LOCK(pmap);
8632 			rw_wlock(lock);
8633 			if (pvh_gen != pvh->pv_gen) {
8634 				PMAP_UNLOCK(pmap);
8635 				goto retry;
8636 			}
8637 		}
8638 		PG_RW = pmap_rw_bit(pmap);
8639 		va = pv->pv_va;
8640 		pde = pmap_pde(pmap, va);
8641 		if ((*pde & PG_RW) != 0)
8642 			(void)pmap_demote_pde_locked(pmap, pde, va, &lock);
8643 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8644 		    ("inconsistent pv lock %p %p for page %p",
8645 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8646 		PMAP_UNLOCK(pmap);
8647 	}
8648 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8649 		pmap = PV_PMAP(pv);
8650 		if (!PMAP_TRYLOCK(pmap)) {
8651 			pvh_gen = pvh->pv_gen;
8652 			md_gen = m->md.pv_gen;
8653 			rw_wunlock(lock);
8654 			PMAP_LOCK(pmap);
8655 			rw_wlock(lock);
8656 			if (pvh_gen != pvh->pv_gen ||
8657 			    md_gen != m->md.pv_gen) {
8658 				PMAP_UNLOCK(pmap);
8659 				goto retry;
8660 			}
8661 		}
8662 		PG_M = pmap_modified_bit(pmap);
8663 		PG_RW = pmap_rw_bit(pmap);
8664 		pde = pmap_pde(pmap, pv->pv_va);
8665 		KASSERT((*pde & PG_PS) == 0,
8666 		    ("pmap_remove_write: found a 2mpage in page %p's pv list",
8667 		    m));
8668 		pte = pmap_pde_to_pte(pde, pv->pv_va);
8669 		oldpte = *pte;
8670 		if (oldpte & PG_RW) {
8671 			while (!atomic_fcmpset_long(pte, &oldpte, oldpte &
8672 			    ~(PG_RW | PG_M)))
8673 				cpu_spinwait();
8674 			if ((oldpte & PG_M) != 0)
8675 				vm_page_dirty(m);
8676 			pmap_invalidate_page(pmap, pv->pv_va);
8677 		}
8678 		PMAP_UNLOCK(pmap);
8679 	}
8680 	rw_wunlock(lock);
8681 	vm_page_aflag_clear(m, PGA_WRITEABLE);
8682 	pmap_delayed_invl_wait(m);
8683 }
8684 
8685 static __inline boolean_t
8686 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
8687 {
8688 
8689 	if (!pmap_emulate_ad_bits(pmap))
8690 		return (TRUE);
8691 
8692 	KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
8693 
8694 	/*
8695 	 * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
8696 	 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
8697 	 * if the EPT_PG_WRITE bit is set.
8698 	 */
8699 	if ((pte & EPT_PG_WRITE) != 0)
8700 		return (FALSE);
8701 
8702 	/*
8703 	 * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
8704 	 */
8705 	if ((pte & EPT_PG_EXECUTE) == 0 ||
8706 	    ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
8707 		return (TRUE);
8708 	else
8709 		return (FALSE);
8710 }
8711 
8712 /*
8713  *	pmap_ts_referenced:
8714  *
8715  *	Return a count of reference bits for a page, clearing those bits.
8716  *	It is not necessary for every reference bit to be cleared, but it
8717  *	is necessary that 0 only be returned when there are truly no
8718  *	reference bits set.
8719  *
8720  *	As an optimization, update the page's dirty field if a modified bit is
8721  *	found while counting reference bits.  This opportunistic update can be
8722  *	performed at low cost and can eliminate the need for some future calls
8723  *	to pmap_is_modified().  However, since this function stops after
8724  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
8725  *	dirty pages.  Those dirty pages will only be detected by a future call
8726  *	to pmap_is_modified().
8727  *
8728  *	A DI block is not needed within this function, because
8729  *	invalidations are performed before the PV list lock is
8730  *	released.
8731  */
8732 int
8733 pmap_ts_referenced(vm_page_t m)
8734 {
8735 	struct md_page *pvh;
8736 	pv_entry_t pv, pvf;
8737 	pmap_t pmap;
8738 	struct rwlock *lock;
8739 	pd_entry_t oldpde, *pde;
8740 	pt_entry_t *pte, PG_A, PG_M, PG_RW;
8741 	vm_offset_t va;
8742 	vm_paddr_t pa;
8743 	int cleared, md_gen, not_cleared, pvh_gen;
8744 	struct spglist free;
8745 	boolean_t demoted;
8746 
8747 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8748 	    ("pmap_ts_referenced: page %p is not managed", m));
8749 	SLIST_INIT(&free);
8750 	cleared = 0;
8751 	pa = VM_PAGE_TO_PHYS(m);
8752 	lock = PHYS_TO_PV_LIST_LOCK(pa);
8753 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
8754 	rw_wlock(lock);
8755 retry:
8756 	not_cleared = 0;
8757 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
8758 		goto small_mappings;
8759 	pv = pvf;
8760 	do {
8761 		if (pvf == NULL)
8762 			pvf = pv;
8763 		pmap = PV_PMAP(pv);
8764 		if (!PMAP_TRYLOCK(pmap)) {
8765 			pvh_gen = pvh->pv_gen;
8766 			rw_wunlock(lock);
8767 			PMAP_LOCK(pmap);
8768 			rw_wlock(lock);
8769 			if (pvh_gen != pvh->pv_gen) {
8770 				PMAP_UNLOCK(pmap);
8771 				goto retry;
8772 			}
8773 		}
8774 		PG_A = pmap_accessed_bit(pmap);
8775 		PG_M = pmap_modified_bit(pmap);
8776 		PG_RW = pmap_rw_bit(pmap);
8777 		va = pv->pv_va;
8778 		pde = pmap_pde(pmap, pv->pv_va);
8779 		oldpde = *pde;
8780 		if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8781 			/*
8782 			 * Although "oldpde" is mapping a 2MB page, because
8783 			 * this function is called at a 4KB page granularity,
8784 			 * we only update the 4KB page under test.
8785 			 */
8786 			vm_page_dirty(m);
8787 		}
8788 		if ((oldpde & PG_A) != 0) {
8789 			/*
8790 			 * Since this reference bit is shared by 512 4KB
8791 			 * pages, it should not be cleared every time it is
8792 			 * tested.  Apply a simple "hash" function on the
8793 			 * physical page number, the virtual superpage number,
8794 			 * and the pmap address to select one 4KB page out of
8795 			 * the 512 on which testing the reference bit will
8796 			 * result in clearing that reference bit.  This
8797 			 * function is designed to avoid the selection of the
8798 			 * same 4KB page for every 2MB page mapping.
8799 			 *
8800 			 * On demotion, a mapping that hasn't been referenced
8801 			 * is simply destroyed.  To avoid the possibility of a
8802 			 * subsequent page fault on a demoted wired mapping,
8803 			 * always leave its reference bit set.  Moreover,
8804 			 * since the superpage is wired, the current state of
8805 			 * its reference bit won't affect page replacement.
8806 			 */
8807 			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
8808 			    (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
8809 			    (oldpde & PG_W) == 0) {
8810 				if (safe_to_clear_referenced(pmap, oldpde)) {
8811 					atomic_clear_long(pde, PG_A);
8812 					pmap_invalidate_page(pmap, pv->pv_va);
8813 					demoted = FALSE;
8814 				} else if (pmap_demote_pde_locked(pmap, pde,
8815 				    pv->pv_va, &lock)) {
8816 					/*
8817 					 * Remove the mapping to a single page
8818 					 * so that a subsequent access may
8819 					 * repromote.  Since the underlying
8820 					 * page table page is fully populated,
8821 					 * this removal never frees a page
8822 					 * table page.
8823 					 */
8824 					demoted = TRUE;
8825 					va += VM_PAGE_TO_PHYS(m) - (oldpde &
8826 					    PG_PS_FRAME);
8827 					pte = pmap_pde_to_pte(pde, va);
8828 					pmap_remove_pte(pmap, pte, va, *pde,
8829 					    NULL, &lock);
8830 					pmap_invalidate_page(pmap, va);
8831 				} else
8832 					demoted = TRUE;
8833 
8834 				if (demoted) {
8835 					/*
8836 					 * The superpage mapping was removed
8837 					 * entirely and therefore 'pv' is no
8838 					 * longer valid.
8839 					 */
8840 					if (pvf == pv)
8841 						pvf = NULL;
8842 					pv = NULL;
8843 				}
8844 				cleared++;
8845 				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8846 				    ("inconsistent pv lock %p %p for page %p",
8847 				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8848 			} else
8849 				not_cleared++;
8850 		}
8851 		PMAP_UNLOCK(pmap);
8852 		/* Rotate the PV list if it has more than one entry. */
8853 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
8854 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
8855 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
8856 			pvh->pv_gen++;
8857 		}
8858 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
8859 			goto out;
8860 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
8861 small_mappings:
8862 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
8863 		goto out;
8864 	pv = pvf;
8865 	do {
8866 		if (pvf == NULL)
8867 			pvf = pv;
8868 		pmap = PV_PMAP(pv);
8869 		if (!PMAP_TRYLOCK(pmap)) {
8870 			pvh_gen = pvh->pv_gen;
8871 			md_gen = m->md.pv_gen;
8872 			rw_wunlock(lock);
8873 			PMAP_LOCK(pmap);
8874 			rw_wlock(lock);
8875 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
8876 				PMAP_UNLOCK(pmap);
8877 				goto retry;
8878 			}
8879 		}
8880 		PG_A = pmap_accessed_bit(pmap);
8881 		PG_M = pmap_modified_bit(pmap);
8882 		PG_RW = pmap_rw_bit(pmap);
8883 		pde = pmap_pde(pmap, pv->pv_va);
8884 		KASSERT((*pde & PG_PS) == 0,
8885 		    ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
8886 		    m));
8887 		pte = pmap_pde_to_pte(pde, pv->pv_va);
8888 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
8889 			vm_page_dirty(m);
8890 		if ((*pte & PG_A) != 0) {
8891 			if (safe_to_clear_referenced(pmap, *pte)) {
8892 				atomic_clear_long(pte, PG_A);
8893 				pmap_invalidate_page(pmap, pv->pv_va);
8894 				cleared++;
8895 			} else if ((*pte & PG_W) == 0) {
8896 				/*
8897 				 * Wired pages cannot be paged out so
8898 				 * doing accessed bit emulation for
8899 				 * them is wasted effort. We do the
8900 				 * hard work for unwired pages only.
8901 				 */
8902 				pmap_remove_pte(pmap, pte, pv->pv_va,
8903 				    *pde, &free, &lock);
8904 				pmap_invalidate_page(pmap, pv->pv_va);
8905 				cleared++;
8906 				if (pvf == pv)
8907 					pvf = NULL;
8908 				pv = NULL;
8909 				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8910 				    ("inconsistent pv lock %p %p for page %p",
8911 				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8912 			} else
8913 				not_cleared++;
8914 		}
8915 		PMAP_UNLOCK(pmap);
8916 		/* Rotate the PV list if it has more than one entry. */
8917 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
8918 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
8919 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
8920 			m->md.pv_gen++;
8921 		}
8922 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
8923 	    not_cleared < PMAP_TS_REFERENCED_MAX);
8924 out:
8925 	rw_wunlock(lock);
8926 	vm_page_free_pages_toq(&free, true);
8927 	return (cleared + not_cleared);
8928 }
8929 
8930 /*
8931  *	Apply the given advice to the specified range of addresses within the
8932  *	given pmap.  Depending on the advice, clear the referenced and/or
8933  *	modified flags in each mapping and set the mapped page's dirty field.
8934  */
8935 void
8936 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
8937 {
8938 	struct rwlock *lock;
8939 	pml4_entry_t *pml4e;
8940 	pdp_entry_t *pdpe;
8941 	pd_entry_t oldpde, *pde;
8942 	pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
8943 	vm_offset_t va, va_next;
8944 	vm_page_t m;
8945 	bool anychanged;
8946 
8947 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
8948 		return;
8949 
8950 	/*
8951 	 * A/D bit emulation requires an alternate code path when clearing
8952 	 * the modified and accessed bits below. Since this function is
8953 	 * advisory in nature we skip it entirely for pmaps that require
8954 	 * A/D bit emulation.
8955 	 */
8956 	if (pmap_emulate_ad_bits(pmap))
8957 		return;
8958 
8959 	PG_A = pmap_accessed_bit(pmap);
8960 	PG_G = pmap_global_bit(pmap);
8961 	PG_M = pmap_modified_bit(pmap);
8962 	PG_V = pmap_valid_bit(pmap);
8963 	PG_RW = pmap_rw_bit(pmap);
8964 	anychanged = false;
8965 	pmap_delayed_invl_start();
8966 	PMAP_LOCK(pmap);
8967 	for (; sva < eva; sva = va_next) {
8968 		pml4e = pmap_pml4e(pmap, sva);
8969 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
8970 			va_next = (sva + NBPML4) & ~PML4MASK;
8971 			if (va_next < sva)
8972 				va_next = eva;
8973 			continue;
8974 		}
8975 
8976 		va_next = (sva + NBPDP) & ~PDPMASK;
8977 		if (va_next < sva)
8978 			va_next = eva;
8979 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
8980 		if ((*pdpe & PG_V) == 0)
8981 			continue;
8982 		if ((*pdpe & PG_PS) != 0) {
8983 			KASSERT(va_next <= eva,
8984 			    ("partial update of non-transparent 1G mapping "
8985 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8986 			    *pdpe, sva, eva, va_next));
8987 			continue;
8988 		}
8989 
8990 		va_next = (sva + NBPDR) & ~PDRMASK;
8991 		if (va_next < sva)
8992 			va_next = eva;
8993 		pde = pmap_pdpe_to_pde(pdpe, sva);
8994 		oldpde = *pde;
8995 		if ((oldpde & PG_V) == 0)
8996 			continue;
8997 		else if ((oldpde & PG_PS) != 0) {
8998 			if ((oldpde & PG_MANAGED) == 0)
8999 				continue;
9000 			lock = NULL;
9001 			if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
9002 				if (lock != NULL)
9003 					rw_wunlock(lock);
9004 
9005 				/*
9006 				 * The large page mapping was destroyed.
9007 				 */
9008 				continue;
9009 			}
9010 
9011 			/*
9012 			 * Unless the page mappings are wired, remove the
9013 			 * mapping to a single page so that a subsequent
9014 			 * access may repromote.  Choosing the last page
9015 			 * within the address range [sva, min(va_next, eva))
9016 			 * generally results in more repromotions.  Since the
9017 			 * underlying page table page is fully populated, this
9018 			 * removal never frees a page table page.
9019 			 */
9020 			if ((oldpde & PG_W) == 0) {
9021 				va = eva;
9022 				if (va > va_next)
9023 					va = va_next;
9024 				va -= PAGE_SIZE;
9025 				KASSERT(va >= sva,
9026 				    ("pmap_advise: no address gap"));
9027 				pte = pmap_pde_to_pte(pde, va);
9028 				KASSERT((*pte & PG_V) != 0,
9029 				    ("pmap_advise: invalid PTE"));
9030 				pmap_remove_pte(pmap, pte, va, *pde, NULL,
9031 				    &lock);
9032 				anychanged = true;
9033 			}
9034 			if (lock != NULL)
9035 				rw_wunlock(lock);
9036 		}
9037 		if (va_next > eva)
9038 			va_next = eva;
9039 		va = va_next;
9040 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
9041 		    sva += PAGE_SIZE) {
9042 			if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
9043 				goto maybe_invlrng;
9044 			else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9045 				if (advice == MADV_DONTNEED) {
9046 					/*
9047 					 * Future calls to pmap_is_modified()
9048 					 * can be avoided by making the page
9049 					 * dirty now.
9050 					 */
9051 					m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
9052 					vm_page_dirty(m);
9053 				}
9054 				atomic_clear_long(pte, PG_M | PG_A);
9055 			} else if ((*pte & PG_A) != 0)
9056 				atomic_clear_long(pte, PG_A);
9057 			else
9058 				goto maybe_invlrng;
9059 
9060 			if ((*pte & PG_G) != 0) {
9061 				if (va == va_next)
9062 					va = sva;
9063 			} else
9064 				anychanged = true;
9065 			continue;
9066 maybe_invlrng:
9067 			if (va != va_next) {
9068 				pmap_invalidate_range(pmap, va, sva);
9069 				va = va_next;
9070 			}
9071 		}
9072 		if (va != va_next)
9073 			pmap_invalidate_range(pmap, va, sva);
9074 	}
9075 	if (anychanged)
9076 		pmap_invalidate_all(pmap);
9077 	PMAP_UNLOCK(pmap);
9078 	pmap_delayed_invl_finish();
9079 }
9080 
9081 /*
9082  *	Clear the modify bits on the specified physical page.
9083  */
9084 void
9085 pmap_clear_modify(vm_page_t m)
9086 {
9087 	struct md_page *pvh;
9088 	pmap_t pmap;
9089 	pv_entry_t next_pv, pv;
9090 	pd_entry_t oldpde, *pde;
9091 	pt_entry_t *pte, PG_M, PG_RW;
9092 	struct rwlock *lock;
9093 	vm_offset_t va;
9094 	int md_gen, pvh_gen;
9095 
9096 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
9097 	    ("pmap_clear_modify: page %p is not managed", m));
9098 	vm_page_assert_busied(m);
9099 
9100 	if (!pmap_page_is_write_mapped(m))
9101 		return;
9102 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
9103 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
9104 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
9105 	rw_wlock(lock);
9106 restart:
9107 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
9108 		pmap = PV_PMAP(pv);
9109 		if (!PMAP_TRYLOCK(pmap)) {
9110 			pvh_gen = pvh->pv_gen;
9111 			rw_wunlock(lock);
9112 			PMAP_LOCK(pmap);
9113 			rw_wlock(lock);
9114 			if (pvh_gen != pvh->pv_gen) {
9115 				PMAP_UNLOCK(pmap);
9116 				goto restart;
9117 			}
9118 		}
9119 		PG_M = pmap_modified_bit(pmap);
9120 		PG_RW = pmap_rw_bit(pmap);
9121 		va = pv->pv_va;
9122 		pde = pmap_pde(pmap, va);
9123 		oldpde = *pde;
9124 		/* If oldpde has PG_RW set, then it also has PG_M set. */
9125 		if ((oldpde & PG_RW) != 0 &&
9126 		    pmap_demote_pde_locked(pmap, pde, va, &lock) &&
9127 		    (oldpde & PG_W) == 0) {
9128 			/*
9129 			 * Write protect the mapping to a single page so that
9130 			 * a subsequent write access may repromote.
9131 			 */
9132 			va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME);
9133 			pte = pmap_pde_to_pte(pde, va);
9134 			atomic_clear_long(pte, PG_M | PG_RW);
9135 			vm_page_dirty(m);
9136 			pmap_invalidate_page(pmap, va);
9137 		}
9138 		PMAP_UNLOCK(pmap);
9139 	}
9140 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
9141 		pmap = PV_PMAP(pv);
9142 		if (!PMAP_TRYLOCK(pmap)) {
9143 			md_gen = m->md.pv_gen;
9144 			pvh_gen = pvh->pv_gen;
9145 			rw_wunlock(lock);
9146 			PMAP_LOCK(pmap);
9147 			rw_wlock(lock);
9148 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
9149 				PMAP_UNLOCK(pmap);
9150 				goto restart;
9151 			}
9152 		}
9153 		PG_M = pmap_modified_bit(pmap);
9154 		PG_RW = pmap_rw_bit(pmap);
9155 		pde = pmap_pde(pmap, pv->pv_va);
9156 		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
9157 		    " a 2mpage in page %p's pv list", m));
9158 		pte = pmap_pde_to_pte(pde, pv->pv_va);
9159 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9160 			atomic_clear_long(pte, PG_M);
9161 			pmap_invalidate_page(pmap, pv->pv_va);
9162 		}
9163 		PMAP_UNLOCK(pmap);
9164 	}
9165 	rw_wunlock(lock);
9166 }
9167 
9168 /*
9169  * Miscellaneous support routines follow
9170  */
9171 
9172 /* Adjust the properties for a leaf page table entry. */
9173 static __inline void
9174 pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask)
9175 {
9176 	u_long opte, npte;
9177 
9178 	opte = *(u_long *)pte;
9179 	do {
9180 		npte = opte & ~mask;
9181 		npte |= bits;
9182 	} while (npte != opte && !atomic_fcmpset_long((u_long *)pte, &opte,
9183 	    npte));
9184 }
9185 
9186 /*
9187  * Map a set of physical memory pages into the kernel virtual
9188  * address space. Return a pointer to where it is mapped. This
9189  * routine is intended to be used for mapping device memory,
9190  * NOT real memory.
9191  */
9192 static void *
9193 pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags)
9194 {
9195 	struct pmap_preinit_mapping *ppim;
9196 	vm_offset_t va, offset;
9197 	vm_size_t tmpsize;
9198 	int i;
9199 
9200 	offset = pa & PAGE_MASK;
9201 	size = round_page(offset + size);
9202 	pa = trunc_page(pa);
9203 
9204 	if (!pmap_initialized) {
9205 		va = 0;
9206 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9207 			ppim = pmap_preinit_mapping + i;
9208 			if (ppim->va == 0) {
9209 				ppim->pa = pa;
9210 				ppim->sz = size;
9211 				ppim->mode = mode;
9212 				ppim->va = virtual_avail;
9213 				virtual_avail += size;
9214 				va = ppim->va;
9215 				break;
9216 			}
9217 		}
9218 		if (va == 0)
9219 			panic("%s: too many preinit mappings", __func__);
9220 	} else {
9221 		/*
9222 		 * If we have a preinit mapping, re-use it.
9223 		 */
9224 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9225 			ppim = pmap_preinit_mapping + i;
9226 			if (ppim->pa == pa && ppim->sz == size &&
9227 			    (ppim->mode == mode ||
9228 			    (flags & MAPDEV_SETATTR) == 0))
9229 				return ((void *)(ppim->va + offset));
9230 		}
9231 		/*
9232 		 * If the specified range of physical addresses fits within
9233 		 * the direct map window, use the direct map.
9234 		 */
9235 		if (pa < dmaplimit && pa + size <= dmaplimit) {
9236 			va = PHYS_TO_DMAP(pa);
9237 			if ((flags & MAPDEV_SETATTR) != 0) {
9238 				PMAP_LOCK(kernel_pmap);
9239 				i = pmap_change_props_locked(va, size,
9240 				    PROT_NONE, mode, flags);
9241 				PMAP_UNLOCK(kernel_pmap);
9242 			} else
9243 				i = 0;
9244 			if (!i)
9245 				return ((void *)(va + offset));
9246 		}
9247 		va = kva_alloc(size);
9248 		if (va == 0)
9249 			panic("%s: Couldn't allocate KVA", __func__);
9250 	}
9251 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
9252 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
9253 	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
9254 	if ((flags & MAPDEV_FLUSHCACHE) != 0)
9255 		pmap_invalidate_cache_range(va, va + tmpsize);
9256 	return ((void *)(va + offset));
9257 }
9258 
9259 void *
9260 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
9261 {
9262 
9263 	return (pmap_mapdev_internal(pa, size, mode, MAPDEV_FLUSHCACHE |
9264 	    MAPDEV_SETATTR));
9265 }
9266 
9267 void *
9268 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
9269 {
9270 
9271 	return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
9272 }
9273 
9274 void *
9275 pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size)
9276 {
9277 
9278 	return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE,
9279 	    MAPDEV_SETATTR));
9280 }
9281 
9282 void *
9283 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
9284 {
9285 
9286 	return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK,
9287 	    MAPDEV_FLUSHCACHE));
9288 }
9289 
9290 void
9291 pmap_unmapdev(vm_offset_t va, vm_size_t size)
9292 {
9293 	struct pmap_preinit_mapping *ppim;
9294 	vm_offset_t offset;
9295 	int i;
9296 
9297 	/* If we gave a direct map region in pmap_mapdev, do nothing */
9298 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
9299 		return;
9300 	offset = va & PAGE_MASK;
9301 	size = round_page(offset + size);
9302 	va = trunc_page(va);
9303 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9304 		ppim = pmap_preinit_mapping + i;
9305 		if (ppim->va == va && ppim->sz == size) {
9306 			if (pmap_initialized)
9307 				return;
9308 			ppim->pa = 0;
9309 			ppim->va = 0;
9310 			ppim->sz = 0;
9311 			ppim->mode = 0;
9312 			if (va + size == virtual_avail)
9313 				virtual_avail = va;
9314 			return;
9315 		}
9316 	}
9317 	if (pmap_initialized) {
9318 		pmap_qremove(va, atop(size));
9319 		kva_free(va, size);
9320 	}
9321 }
9322 
9323 /*
9324  * Tries to demote a 1GB page mapping.
9325  */
9326 static boolean_t
9327 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
9328 {
9329 	pdp_entry_t newpdpe, oldpdpe;
9330 	pd_entry_t *firstpde, newpde, *pde;
9331 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
9332 	vm_paddr_t pdpgpa;
9333 	vm_page_t pdpg;
9334 
9335 	PG_A = pmap_accessed_bit(pmap);
9336 	PG_M = pmap_modified_bit(pmap);
9337 	PG_V = pmap_valid_bit(pmap);
9338 	PG_RW = pmap_rw_bit(pmap);
9339 
9340 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9341 	oldpdpe = *pdpe;
9342 	KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
9343 	    ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
9344 	pdpg = pmap_alloc_pt_page(pmap, va >> PDPSHIFT,
9345 	    VM_ALLOC_WIRED | VM_ALLOC_INTERRUPT);
9346 	if (pdpg  == NULL) {
9347 		CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
9348 		    " in pmap %p", va, pmap);
9349 		return (FALSE);
9350 	}
9351 	pdpgpa = VM_PAGE_TO_PHYS(pdpg);
9352 	firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
9353 	newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
9354 	KASSERT((oldpdpe & PG_A) != 0,
9355 	    ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
9356 	KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
9357 	    ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
9358 	newpde = oldpdpe;
9359 
9360 	/*
9361 	 * Initialize the page directory page.
9362 	 */
9363 	for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
9364 		*pde = newpde;
9365 		newpde += NBPDR;
9366 	}
9367 
9368 	/*
9369 	 * Demote the mapping.
9370 	 */
9371 	*pdpe = newpdpe;
9372 
9373 	/*
9374 	 * Invalidate a stale recursive mapping of the page directory page.
9375 	 */
9376 	pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
9377 
9378 	counter_u64_add(pmap_pdpe_demotions, 1);
9379 	CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
9380 	    " in pmap %p", va, pmap);
9381 	return (TRUE);
9382 }
9383 
9384 /*
9385  * Sets the memory attribute for the specified page.
9386  */
9387 void
9388 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
9389 {
9390 
9391 	m->md.pat_mode = ma;
9392 
9393 	/*
9394 	 * If "m" is a normal page, update its direct mapping.  This update
9395 	 * can be relied upon to perform any cache operations that are
9396 	 * required for data coherence.
9397 	 */
9398 	if ((m->flags & PG_FICTITIOUS) == 0 &&
9399 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
9400 	    m->md.pat_mode))
9401 		panic("memory attribute change on the direct map failed");
9402 }
9403 
9404 void
9405 pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma)
9406 {
9407 	int error;
9408 
9409 	m->md.pat_mode = ma;
9410 
9411 	if ((m->flags & PG_FICTITIOUS) != 0)
9412 		return;
9413 	PMAP_LOCK(kernel_pmap);
9414 	error = pmap_change_props_locked(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
9415 	    PAGE_SIZE, PROT_NONE, m->md.pat_mode, 0);
9416 	PMAP_UNLOCK(kernel_pmap);
9417 	if (error != 0)
9418 		panic("memory attribute change on the direct map failed");
9419 }
9420 
9421 /*
9422  * Changes the specified virtual address range's memory type to that given by
9423  * the parameter "mode".  The specified virtual address range must be
9424  * completely contained within either the direct map or the kernel map.  If
9425  * the virtual address range is contained within the kernel map, then the
9426  * memory type for each of the corresponding ranges of the direct map is also
9427  * changed.  (The corresponding ranges of the direct map are those ranges that
9428  * map the same physical pages as the specified virtual address range.)  These
9429  * changes to the direct map are necessary because Intel describes the
9430  * behavior of their processors as "undefined" if two or more mappings to the
9431  * same physical page have different memory types.
9432  *
9433  * Returns zero if the change completed successfully, and either EINVAL or
9434  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
9435  * of the virtual address range was not mapped, and ENOMEM is returned if
9436  * there was insufficient memory available to complete the change.  In the
9437  * latter case, the memory type may have been changed on some part of the
9438  * virtual address range or the direct map.
9439  */
9440 int
9441 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
9442 {
9443 	int error;
9444 
9445 	PMAP_LOCK(kernel_pmap);
9446 	error = pmap_change_props_locked(va, size, PROT_NONE, mode,
9447 	    MAPDEV_FLUSHCACHE);
9448 	PMAP_UNLOCK(kernel_pmap);
9449 	return (error);
9450 }
9451 
9452 /*
9453  * Changes the specified virtual address range's protections to those
9454  * specified by "prot".  Like pmap_change_attr(), protections for aliases
9455  * in the direct map are updated as well.  Protections on aliasing mappings may
9456  * be a subset of the requested protections; for example, mappings in the direct
9457  * map are never executable.
9458  */
9459 int
9460 pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
9461 {
9462 	int error;
9463 
9464 	/* Only supported within the kernel map. */
9465 	if (va < VM_MIN_KERNEL_ADDRESS)
9466 		return (EINVAL);
9467 
9468 	PMAP_LOCK(kernel_pmap);
9469 	error = pmap_change_props_locked(va, size, prot, -1,
9470 	    MAPDEV_ASSERTVALID);
9471 	PMAP_UNLOCK(kernel_pmap);
9472 	return (error);
9473 }
9474 
9475 static int
9476 pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
9477     int mode, int flags)
9478 {
9479 	vm_offset_t base, offset, tmpva;
9480 	vm_paddr_t pa_start, pa_end, pa_end1;
9481 	pdp_entry_t *pdpe;
9482 	pd_entry_t *pde, pde_bits, pde_mask;
9483 	pt_entry_t *pte, pte_bits, pte_mask;
9484 	int error;
9485 	bool changed;
9486 
9487 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
9488 	base = trunc_page(va);
9489 	offset = va & PAGE_MASK;
9490 	size = round_page(offset + size);
9491 
9492 	/*
9493 	 * Only supported on kernel virtual addresses, including the direct
9494 	 * map but excluding the recursive map.
9495 	 */
9496 	if (base < DMAP_MIN_ADDRESS)
9497 		return (EINVAL);
9498 
9499 	/*
9500 	 * Construct our flag sets and masks.  "bits" is the subset of
9501 	 * "mask" that will be set in each modified PTE.
9502 	 *
9503 	 * Mappings in the direct map are never allowed to be executable.
9504 	 */
9505 	pde_bits = pte_bits = 0;
9506 	pde_mask = pte_mask = 0;
9507 	if (mode != -1) {
9508 		pde_bits |= pmap_cache_bits(kernel_pmap, mode, true);
9509 		pde_mask |= X86_PG_PDE_CACHE;
9510 		pte_bits |= pmap_cache_bits(kernel_pmap, mode, false);
9511 		pte_mask |= X86_PG_PTE_CACHE;
9512 	}
9513 	if (prot != VM_PROT_NONE) {
9514 		if ((prot & VM_PROT_WRITE) != 0) {
9515 			pde_bits |= X86_PG_RW;
9516 			pte_bits |= X86_PG_RW;
9517 		}
9518 		if ((prot & VM_PROT_EXECUTE) == 0 ||
9519 		    va < VM_MIN_KERNEL_ADDRESS) {
9520 			pde_bits |= pg_nx;
9521 			pte_bits |= pg_nx;
9522 		}
9523 		pde_mask |= X86_PG_RW | pg_nx;
9524 		pte_mask |= X86_PG_RW | pg_nx;
9525 	}
9526 
9527 	/*
9528 	 * Pages that aren't mapped aren't supported.  Also break down 2MB pages
9529 	 * into 4KB pages if required.
9530 	 */
9531 	for (tmpva = base; tmpva < base + size; ) {
9532 		pdpe = pmap_pdpe(kernel_pmap, tmpva);
9533 		if (pdpe == NULL || *pdpe == 0) {
9534 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9535 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
9536 			return (EINVAL);
9537 		}
9538 		if (*pdpe & PG_PS) {
9539 			/*
9540 			 * If the current 1GB page already has the required
9541 			 * properties, then we need not demote this page.  Just
9542 			 * increment tmpva to the next 1GB page frame.
9543 			 */
9544 			if ((*pdpe & pde_mask) == pde_bits) {
9545 				tmpva = trunc_1gpage(tmpva) + NBPDP;
9546 				continue;
9547 			}
9548 
9549 			/*
9550 			 * If the current offset aligns with a 1GB page frame
9551 			 * and there is at least 1GB left within the range, then
9552 			 * we need not break down this page into 2MB pages.
9553 			 */
9554 			if ((tmpva & PDPMASK) == 0 &&
9555 			    tmpva + PDPMASK < base + size) {
9556 				tmpva += NBPDP;
9557 				continue;
9558 			}
9559 			if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
9560 				return (ENOMEM);
9561 		}
9562 		pde = pmap_pdpe_to_pde(pdpe, tmpva);
9563 		if (*pde == 0) {
9564 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9565 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
9566 			return (EINVAL);
9567 		}
9568 		if (*pde & PG_PS) {
9569 			/*
9570 			 * If the current 2MB page already has the required
9571 			 * properties, then we need not demote this page.  Just
9572 			 * increment tmpva to the next 2MB page frame.
9573 			 */
9574 			if ((*pde & pde_mask) == pde_bits) {
9575 				tmpva = trunc_2mpage(tmpva) + NBPDR;
9576 				continue;
9577 			}
9578 
9579 			/*
9580 			 * If the current offset aligns with a 2MB page frame
9581 			 * and there is at least 2MB left within the range, then
9582 			 * we need not break down this page into 4KB pages.
9583 			 */
9584 			if ((tmpva & PDRMASK) == 0 &&
9585 			    tmpva + PDRMASK < base + size) {
9586 				tmpva += NBPDR;
9587 				continue;
9588 			}
9589 			if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
9590 				return (ENOMEM);
9591 		}
9592 		pte = pmap_pde_to_pte(pde, tmpva);
9593 		if (*pte == 0) {
9594 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9595 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
9596 			return (EINVAL);
9597 		}
9598 		tmpva += PAGE_SIZE;
9599 	}
9600 	error = 0;
9601 
9602 	/*
9603 	 * Ok, all the pages exist, so run through them updating their
9604 	 * properties if required.
9605 	 */
9606 	changed = false;
9607 	pa_start = pa_end = 0;
9608 	for (tmpva = base; tmpva < base + size; ) {
9609 		pdpe = pmap_pdpe(kernel_pmap, tmpva);
9610 		if (*pdpe & PG_PS) {
9611 			if ((*pdpe & pde_mask) != pde_bits) {
9612 				pmap_pte_props(pdpe, pde_bits, pde_mask);
9613 				changed = true;
9614 			}
9615 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9616 			    (*pdpe & PG_PS_FRAME) < dmaplimit) {
9617 				if (pa_start == pa_end) {
9618 					/* Start physical address run. */
9619 					pa_start = *pdpe & PG_PS_FRAME;
9620 					pa_end = pa_start + NBPDP;
9621 				} else if (pa_end == (*pdpe & PG_PS_FRAME))
9622 					pa_end += NBPDP;
9623 				else {
9624 					/* Run ended, update direct map. */
9625 					error = pmap_change_props_locked(
9626 					    PHYS_TO_DMAP(pa_start),
9627 					    pa_end - pa_start, prot, mode,
9628 					    flags);
9629 					if (error != 0)
9630 						break;
9631 					/* Start physical address run. */
9632 					pa_start = *pdpe & PG_PS_FRAME;
9633 					pa_end = pa_start + NBPDP;
9634 				}
9635 			}
9636 			tmpva = trunc_1gpage(tmpva) + NBPDP;
9637 			continue;
9638 		}
9639 		pde = pmap_pdpe_to_pde(pdpe, tmpva);
9640 		if (*pde & PG_PS) {
9641 			if ((*pde & pde_mask) != pde_bits) {
9642 				pmap_pte_props(pde, pde_bits, pde_mask);
9643 				changed = true;
9644 			}
9645 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9646 			    (*pde & PG_PS_FRAME) < dmaplimit) {
9647 				if (pa_start == pa_end) {
9648 					/* Start physical address run. */
9649 					pa_start = *pde & PG_PS_FRAME;
9650 					pa_end = pa_start + NBPDR;
9651 				} else if (pa_end == (*pde & PG_PS_FRAME))
9652 					pa_end += NBPDR;
9653 				else {
9654 					/* Run ended, update direct map. */
9655 					error = pmap_change_props_locked(
9656 					    PHYS_TO_DMAP(pa_start),
9657 					    pa_end - pa_start, prot, mode,
9658 					    flags);
9659 					if (error != 0)
9660 						break;
9661 					/* Start physical address run. */
9662 					pa_start = *pde & PG_PS_FRAME;
9663 					pa_end = pa_start + NBPDR;
9664 				}
9665 			}
9666 			tmpva = trunc_2mpage(tmpva) + NBPDR;
9667 		} else {
9668 			pte = pmap_pde_to_pte(pde, tmpva);
9669 			if ((*pte & pte_mask) != pte_bits) {
9670 				pmap_pte_props(pte, pte_bits, pte_mask);
9671 				changed = true;
9672 			}
9673 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9674 			    (*pte & PG_FRAME) < dmaplimit) {
9675 				if (pa_start == pa_end) {
9676 					/* Start physical address run. */
9677 					pa_start = *pte & PG_FRAME;
9678 					pa_end = pa_start + PAGE_SIZE;
9679 				} else if (pa_end == (*pte & PG_FRAME))
9680 					pa_end += PAGE_SIZE;
9681 				else {
9682 					/* Run ended, update direct map. */
9683 					error = pmap_change_props_locked(
9684 					    PHYS_TO_DMAP(pa_start),
9685 					    pa_end - pa_start, prot, mode,
9686 					    flags);
9687 					if (error != 0)
9688 						break;
9689 					/* Start physical address run. */
9690 					pa_start = *pte & PG_FRAME;
9691 					pa_end = pa_start + PAGE_SIZE;
9692 				}
9693 			}
9694 			tmpva += PAGE_SIZE;
9695 		}
9696 	}
9697 	if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
9698 		pa_end1 = MIN(pa_end, dmaplimit);
9699 		if (pa_start != pa_end1)
9700 			error = pmap_change_props_locked(PHYS_TO_DMAP(pa_start),
9701 			    pa_end1 - pa_start, prot, mode, flags);
9702 	}
9703 
9704 	/*
9705 	 * Flush CPU caches if required to make sure any data isn't cached that
9706 	 * shouldn't be, etc.
9707 	 */
9708 	if (changed) {
9709 		pmap_invalidate_range(kernel_pmap, base, tmpva);
9710 		if ((flags & MAPDEV_FLUSHCACHE) != 0)
9711 			pmap_invalidate_cache_range(base, tmpva);
9712 	}
9713 	return (error);
9714 }
9715 
9716 /*
9717  * Demotes any mapping within the direct map region that covers more than the
9718  * specified range of physical addresses.  This range's size must be a power
9719  * of two and its starting address must be a multiple of its size.  Since the
9720  * demotion does not change any attributes of the mapping, a TLB invalidation
9721  * is not mandatory.  The caller may, however, request a TLB invalidation.
9722  */
9723 void
9724 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
9725 {
9726 	pdp_entry_t *pdpe;
9727 	pd_entry_t *pde;
9728 	vm_offset_t va;
9729 	boolean_t changed;
9730 
9731 	if (len == 0)
9732 		return;
9733 	KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
9734 	KASSERT((base & (len - 1)) == 0,
9735 	    ("pmap_demote_DMAP: base is not a multiple of len"));
9736 	if (len < NBPDP && base < dmaplimit) {
9737 		va = PHYS_TO_DMAP(base);
9738 		changed = FALSE;
9739 		PMAP_LOCK(kernel_pmap);
9740 		pdpe = pmap_pdpe(kernel_pmap, va);
9741 		if ((*pdpe & X86_PG_V) == 0)
9742 			panic("pmap_demote_DMAP: invalid PDPE");
9743 		if ((*pdpe & PG_PS) != 0) {
9744 			if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
9745 				panic("pmap_demote_DMAP: PDPE failed");
9746 			changed = TRUE;
9747 		}
9748 		if (len < NBPDR) {
9749 			pde = pmap_pdpe_to_pde(pdpe, va);
9750 			if ((*pde & X86_PG_V) == 0)
9751 				panic("pmap_demote_DMAP: invalid PDE");
9752 			if ((*pde & PG_PS) != 0) {
9753 				if (!pmap_demote_pde(kernel_pmap, pde, va))
9754 					panic("pmap_demote_DMAP: PDE failed");
9755 				changed = TRUE;
9756 			}
9757 		}
9758 		if (changed && invalidate)
9759 			pmap_invalidate_page(kernel_pmap, va);
9760 		PMAP_UNLOCK(kernel_pmap);
9761 	}
9762 }
9763 
9764 /*
9765  * Perform the pmap work for mincore(2).  If the page is not both referenced and
9766  * modified by this pmap, returns its physical address so that the caller can
9767  * find other mappings.
9768  */
9769 int
9770 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
9771 {
9772 	pdp_entry_t *pdpe;
9773 	pd_entry_t *pdep;
9774 	pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
9775 	vm_paddr_t pa;
9776 	int val;
9777 
9778 	PG_A = pmap_accessed_bit(pmap);
9779 	PG_M = pmap_modified_bit(pmap);
9780 	PG_V = pmap_valid_bit(pmap);
9781 	PG_RW = pmap_rw_bit(pmap);
9782 
9783 	PMAP_LOCK(pmap);
9784 	pte = 0;
9785 	pa = 0;
9786 	val = 0;
9787 	pdpe = pmap_pdpe(pmap, addr);
9788 	if (pdpe == NULL)
9789 		goto out;
9790 	if ((*pdpe & PG_V) != 0) {
9791 		if ((*pdpe & PG_PS) != 0) {
9792 			pte = *pdpe;
9793 			pa = ((pte & PG_PS_PDP_FRAME) | (addr & PDPMASK)) &
9794 			    PG_FRAME;
9795 			val = MINCORE_PSIND(2);
9796 		} else {
9797 			pdep = pmap_pde(pmap, addr);
9798 			if (pdep != NULL && (*pdep & PG_V) != 0) {
9799 				if ((*pdep & PG_PS) != 0) {
9800 					pte = *pdep;
9801 			/* Compute the physical address of the 4KB page. */
9802 					pa = ((pte & PG_PS_FRAME) | (addr &
9803 					    PDRMASK)) & PG_FRAME;
9804 					val = MINCORE_PSIND(1);
9805 				} else {
9806 					pte = *pmap_pde_to_pte(pdep, addr);
9807 					pa = pte & PG_FRAME;
9808 					val = 0;
9809 				}
9810 			}
9811 		}
9812 	}
9813 	if ((pte & PG_V) != 0) {
9814 		val |= MINCORE_INCORE;
9815 		if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
9816 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
9817 		if ((pte & PG_A) != 0)
9818 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
9819 	}
9820 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
9821 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
9822 	    (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
9823 		*pap = pa;
9824 	}
9825 out:
9826 	PMAP_UNLOCK(pmap);
9827 	return (val);
9828 }
9829 
9830 static uint64_t
9831 pmap_pcid_alloc(pmap_t pmap, u_int cpuid)
9832 {
9833 	uint32_t gen, new_gen, pcid_next;
9834 
9835 	CRITICAL_ASSERT(curthread);
9836 	gen = PCPU_GET(pcid_gen);
9837 	if (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN)
9838 		return (pti ? 0 : CR3_PCID_SAVE);
9839 	if (pmap->pm_pcids[cpuid].pm_gen == gen)
9840 		return (CR3_PCID_SAVE);
9841 	pcid_next = PCPU_GET(pcid_next);
9842 	KASSERT((!pti && pcid_next <= PMAP_PCID_OVERMAX) ||
9843 	    (pti && pcid_next <= PMAP_PCID_OVERMAX_KERN),
9844 	    ("cpu %d pcid_next %#x", cpuid, pcid_next));
9845 	if ((!pti && pcid_next == PMAP_PCID_OVERMAX) ||
9846 	    (pti && pcid_next == PMAP_PCID_OVERMAX_KERN)) {
9847 		new_gen = gen + 1;
9848 		if (new_gen == 0)
9849 			new_gen = 1;
9850 		PCPU_SET(pcid_gen, new_gen);
9851 		pcid_next = PMAP_PCID_KERN + 1;
9852 	} else {
9853 		new_gen = gen;
9854 	}
9855 	pmap->pm_pcids[cpuid].pm_pcid = pcid_next;
9856 	pmap->pm_pcids[cpuid].pm_gen = new_gen;
9857 	PCPU_SET(pcid_next, pcid_next + 1);
9858 	return (0);
9859 }
9860 
9861 static uint64_t
9862 pmap_pcid_alloc_checked(pmap_t pmap, u_int cpuid)
9863 {
9864 	uint64_t cached;
9865 
9866 	cached = pmap_pcid_alloc(pmap, cpuid);
9867 	KASSERT(pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
9868 	    ("pmap %p cpu %d pcid %#x", pmap, cpuid,
9869 	    pmap->pm_pcids[cpuid].pm_pcid));
9870 	KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
9871 	    pmap == kernel_pmap,
9872 	    ("non-kernel pmap pmap %p cpu %d pcid %#x",
9873 	    pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
9874 	return (cached);
9875 }
9876 
9877 static void
9878 pmap_activate_sw_pti_post(struct thread *td, pmap_t pmap)
9879 {
9880 
9881 	PCPU_GET(tssp)->tss_rsp0 = pmap->pm_ucr3 != PMAP_NO_CR3 ?
9882 	    PCPU_GET(pti_rsp0) : (uintptr_t)td->td_md.md_stack_base;
9883 }
9884 
9885 static void
9886 pmap_activate_sw_pcid_pti(struct thread *td, pmap_t pmap, u_int cpuid)
9887 {
9888 	pmap_t old_pmap;
9889 	uint64_t cached, cr3, kcr3, ucr3;
9890 
9891 	KASSERT((read_rflags() & PSL_I) == 0,
9892 	    ("PCID needs interrupts disabled in pmap_activate_sw()"));
9893 
9894 	/* See the comment in pmap_invalidate_page_pcid(). */
9895 	if (PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK) {
9896 		PCPU_SET(ucr3_load_mask, PMAP_UCR3_NOMASK);
9897 		old_pmap = PCPU_GET(curpmap);
9898 		MPASS(old_pmap->pm_ucr3 != PMAP_NO_CR3);
9899 		old_pmap->pm_pcids[cpuid].pm_gen = 0;
9900 	}
9901 
9902 	cached = pmap_pcid_alloc_checked(pmap, cpuid);
9903 	cr3 = rcr3();
9904 	if ((cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
9905 		load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid);
9906 	PCPU_SET(curpmap, pmap);
9907 	kcr3 = pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid;
9908 	ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[cpuid].pm_pcid |
9909 	    PMAP_PCID_USER_PT;
9910 
9911 	if (!cached && pmap->pm_ucr3 != PMAP_NO_CR3)
9912 		PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
9913 
9914 	PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
9915 	PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
9916 	if (cached)
9917 		counter_u64_add(pcid_save_cnt, 1);
9918 
9919 	pmap_activate_sw_pti_post(td, pmap);
9920 }
9921 
9922 static void
9923 pmap_activate_sw_pcid_nopti(struct thread *td __unused, pmap_t pmap,
9924     u_int cpuid)
9925 {
9926 	uint64_t cached, cr3;
9927 
9928 	KASSERT((read_rflags() & PSL_I) == 0,
9929 	    ("PCID needs interrupts disabled in pmap_activate_sw()"));
9930 
9931 	cached = pmap_pcid_alloc_checked(pmap, cpuid);
9932 	cr3 = rcr3();
9933 	if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
9934 		load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
9935 		    cached);
9936 	PCPU_SET(curpmap, pmap);
9937 	if (cached)
9938 		counter_u64_add(pcid_save_cnt, 1);
9939 }
9940 
9941 static void
9942 pmap_activate_sw_nopcid_nopti(struct thread *td __unused, pmap_t pmap,
9943     u_int cpuid __unused)
9944 {
9945 
9946 	load_cr3(pmap->pm_cr3);
9947 	PCPU_SET(curpmap, pmap);
9948 }
9949 
9950 static void
9951 pmap_activate_sw_nopcid_pti(struct thread *td, pmap_t pmap,
9952     u_int cpuid __unused)
9953 {
9954 
9955 	pmap_activate_sw_nopcid_nopti(td, pmap, cpuid);
9956 	PCPU_SET(kcr3, pmap->pm_cr3);
9957 	PCPU_SET(ucr3, pmap->pm_ucr3);
9958 	pmap_activate_sw_pti_post(td, pmap);
9959 }
9960 
9961 DEFINE_IFUNC(static, void, pmap_activate_sw_mode, (struct thread *, pmap_t,
9962     u_int))
9963 {
9964 
9965 	if (pmap_pcid_enabled && pti)
9966 		return (pmap_activate_sw_pcid_pti);
9967 	else if (pmap_pcid_enabled && !pti)
9968 		return (pmap_activate_sw_pcid_nopti);
9969 	else if (!pmap_pcid_enabled && pti)
9970 		return (pmap_activate_sw_nopcid_pti);
9971 	else /* if (!pmap_pcid_enabled && !pti) */
9972 		return (pmap_activate_sw_nopcid_nopti);
9973 }
9974 
9975 void
9976 pmap_activate_sw(struct thread *td)
9977 {
9978 	pmap_t oldpmap, pmap;
9979 	u_int cpuid;
9980 
9981 	oldpmap = PCPU_GET(curpmap);
9982 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
9983 	if (oldpmap == pmap) {
9984 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
9985 			mfence();
9986 		return;
9987 	}
9988 	cpuid = PCPU_GET(cpuid);
9989 #ifdef SMP
9990 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
9991 #else
9992 	CPU_SET(cpuid, &pmap->pm_active);
9993 #endif
9994 	pmap_activate_sw_mode(td, pmap, cpuid);
9995 #ifdef SMP
9996 	CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
9997 #else
9998 	CPU_CLR(cpuid, &oldpmap->pm_active);
9999 #endif
10000 }
10001 
10002 void
10003 pmap_activate(struct thread *td)
10004 {
10005 	/*
10006 	 * invltlb_{invpcid,}_pcid_handler() is used to handle an
10007 	 * invalidate_all IPI, which checks for curpmap ==
10008 	 * smp_tlb_pmap.  The below sequence of operations has a
10009 	 * window where %CR3 is loaded with the new pmap's PML4
10010 	 * address, but the curpmap value has not yet been updated.
10011 	 * This causes the invltlb IPI handler, which is called
10012 	 * between the updates, to execute as a NOP, which leaves
10013 	 * stale TLB entries.
10014 	 *
10015 	 * Note that the most common use of pmap_activate_sw(), from
10016 	 * a context switch, is immune to this race, because
10017 	 * interrupts are disabled (while the thread lock is owned),
10018 	 * so the IPI is delayed until after curpmap is updated.  Protect
10019 	 * other callers in a similar way, by disabling interrupts
10020 	 * around the %cr3 register reload and curpmap assignment.
10021 	 */
10022 	spinlock_enter();
10023 	pmap_activate_sw(td);
10024 	spinlock_exit();
10025 }
10026 
10027 void
10028 pmap_activate_boot(pmap_t pmap)
10029 {
10030 	uint64_t kcr3;
10031 	u_int cpuid;
10032 
10033 	/*
10034 	 * kernel_pmap must be never deactivated, and we ensure that
10035 	 * by never activating it at all.
10036 	 */
10037 	MPASS(pmap != kernel_pmap);
10038 
10039 	cpuid = PCPU_GET(cpuid);
10040 #ifdef SMP
10041 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
10042 #else
10043 	CPU_SET(cpuid, &pmap->pm_active);
10044 #endif
10045 	PCPU_SET(curpmap, pmap);
10046 	if (pti) {
10047 		kcr3 = pmap->pm_cr3;
10048 		if (pmap_pcid_enabled)
10049 			kcr3 |= pmap->pm_pcids[cpuid].pm_pcid | CR3_PCID_SAVE;
10050 	} else {
10051 		kcr3 = PMAP_NO_CR3;
10052 	}
10053 	PCPU_SET(kcr3, kcr3);
10054 	PCPU_SET(ucr3, PMAP_NO_CR3);
10055 }
10056 
10057 void
10058 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
10059 {
10060 }
10061 
10062 /*
10063  *	Increase the starting virtual address of the given mapping if a
10064  *	different alignment might result in more superpage mappings.
10065  */
10066 void
10067 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
10068     vm_offset_t *addr, vm_size_t size)
10069 {
10070 	vm_offset_t superpage_offset;
10071 
10072 	if (size < NBPDR)
10073 		return;
10074 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
10075 		offset += ptoa(object->pg_color);
10076 	superpage_offset = offset & PDRMASK;
10077 	if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
10078 	    (*addr & PDRMASK) == superpage_offset)
10079 		return;
10080 	if ((*addr & PDRMASK) < superpage_offset)
10081 		*addr = (*addr & ~PDRMASK) + superpage_offset;
10082 	else
10083 		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
10084 }
10085 
10086 #ifdef INVARIANTS
10087 static unsigned long num_dirty_emulations;
10088 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
10089 	     &num_dirty_emulations, 0, NULL);
10090 
10091 static unsigned long num_accessed_emulations;
10092 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
10093 	     &num_accessed_emulations, 0, NULL);
10094 
10095 static unsigned long num_superpage_accessed_emulations;
10096 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
10097 	     &num_superpage_accessed_emulations, 0, NULL);
10098 
10099 static unsigned long ad_emulation_superpage_promotions;
10100 SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
10101 	     &ad_emulation_superpage_promotions, 0, NULL);
10102 #endif	/* INVARIANTS */
10103 
10104 int
10105 pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
10106 {
10107 	int rv;
10108 	struct rwlock *lock;
10109 #if VM_NRESERVLEVEL > 0
10110 	vm_page_t m, mpte;
10111 #endif
10112 	pd_entry_t *pde;
10113 	pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
10114 
10115 	KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
10116 	    ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
10117 
10118 	if (!pmap_emulate_ad_bits(pmap))
10119 		return (-1);
10120 
10121 	PG_A = pmap_accessed_bit(pmap);
10122 	PG_M = pmap_modified_bit(pmap);
10123 	PG_V = pmap_valid_bit(pmap);
10124 	PG_RW = pmap_rw_bit(pmap);
10125 
10126 	rv = -1;
10127 	lock = NULL;
10128 	PMAP_LOCK(pmap);
10129 
10130 	pde = pmap_pde(pmap, va);
10131 	if (pde == NULL || (*pde & PG_V) == 0)
10132 		goto done;
10133 
10134 	if ((*pde & PG_PS) != 0) {
10135 		if (ftype == VM_PROT_READ) {
10136 #ifdef INVARIANTS
10137 			atomic_add_long(&num_superpage_accessed_emulations, 1);
10138 #endif
10139 			*pde |= PG_A;
10140 			rv = 0;
10141 		}
10142 		goto done;
10143 	}
10144 
10145 	pte = pmap_pde_to_pte(pde, va);
10146 	if ((*pte & PG_V) == 0)
10147 		goto done;
10148 
10149 	if (ftype == VM_PROT_WRITE) {
10150 		if ((*pte & PG_RW) == 0)
10151 			goto done;
10152 		/*
10153 		 * Set the modified and accessed bits simultaneously.
10154 		 *
10155 		 * Intel EPT PTEs that do software emulation of A/D bits map
10156 		 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
10157 		 * An EPT misconfiguration is triggered if the PTE is writable
10158 		 * but not readable (WR=10). This is avoided by setting PG_A
10159 		 * and PG_M simultaneously.
10160 		 */
10161 		*pte |= PG_M | PG_A;
10162 	} else {
10163 		*pte |= PG_A;
10164 	}
10165 
10166 #if VM_NRESERVLEVEL > 0
10167 	/* try to promote the mapping */
10168 	if (va < VM_MAXUSER_ADDRESS)
10169 		mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
10170 	else
10171 		mpte = NULL;
10172 
10173 	m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
10174 
10175 	if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
10176 	    pmap_ps_enabled(pmap) &&
10177 	    (m->flags & PG_FICTITIOUS) == 0 &&
10178 	    vm_reserv_level_iffullpop(m) == 0) {
10179 		pmap_promote_pde(pmap, pde, va, &lock);
10180 #ifdef INVARIANTS
10181 		atomic_add_long(&ad_emulation_superpage_promotions, 1);
10182 #endif
10183 	}
10184 #endif
10185 
10186 #ifdef INVARIANTS
10187 	if (ftype == VM_PROT_WRITE)
10188 		atomic_add_long(&num_dirty_emulations, 1);
10189 	else
10190 		atomic_add_long(&num_accessed_emulations, 1);
10191 #endif
10192 	rv = 0;		/* success */
10193 done:
10194 	if (lock != NULL)
10195 		rw_wunlock(lock);
10196 	PMAP_UNLOCK(pmap);
10197 	return (rv);
10198 }
10199 
10200 void
10201 pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
10202 {
10203 	pml4_entry_t *pml4;
10204 	pdp_entry_t *pdp;
10205 	pd_entry_t *pde;
10206 	pt_entry_t *pte, PG_V;
10207 	int idx;
10208 
10209 	idx = 0;
10210 	PG_V = pmap_valid_bit(pmap);
10211 	PMAP_LOCK(pmap);
10212 
10213 	pml4 = pmap_pml4e(pmap, va);
10214 	if (pml4 == NULL)
10215 		goto done;
10216 	ptr[idx++] = *pml4;
10217 	if ((*pml4 & PG_V) == 0)
10218 		goto done;
10219 
10220 	pdp = pmap_pml4e_to_pdpe(pml4, va);
10221 	ptr[idx++] = *pdp;
10222 	if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
10223 		goto done;
10224 
10225 	pde = pmap_pdpe_to_pde(pdp, va);
10226 	ptr[idx++] = *pde;
10227 	if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
10228 		goto done;
10229 
10230 	pte = pmap_pde_to_pte(pde, va);
10231 	ptr[idx++] = *pte;
10232 
10233 done:
10234 	PMAP_UNLOCK(pmap);
10235 	*num = idx;
10236 }
10237 
10238 /**
10239  * Get the kernel virtual address of a set of physical pages. If there are
10240  * physical addresses not covered by the DMAP perform a transient mapping
10241  * that will be removed when calling pmap_unmap_io_transient.
10242  *
10243  * \param page        The pages the caller wishes to obtain the virtual
10244  *                    address on the kernel memory map.
10245  * \param vaddr       On return contains the kernel virtual memory address
10246  *                    of the pages passed in the page parameter.
10247  * \param count       Number of pages passed in.
10248  * \param can_fault   TRUE if the thread using the mapped pages can take
10249  *                    page faults, FALSE otherwise.
10250  *
10251  * \returns TRUE if the caller must call pmap_unmap_io_transient when
10252  *          finished or FALSE otherwise.
10253  *
10254  */
10255 boolean_t
10256 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10257     boolean_t can_fault)
10258 {
10259 	vm_paddr_t paddr;
10260 	boolean_t needs_mapping;
10261 	pt_entry_t *pte;
10262 	int cache_bits, error __unused, i;
10263 
10264 	/*
10265 	 * Allocate any KVA space that we need, this is done in a separate
10266 	 * loop to prevent calling vmem_alloc while pinned.
10267 	 */
10268 	needs_mapping = FALSE;
10269 	for (i = 0; i < count; i++) {
10270 		paddr = VM_PAGE_TO_PHYS(page[i]);
10271 		if (__predict_false(paddr >= dmaplimit)) {
10272 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
10273 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
10274 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
10275 			needs_mapping = TRUE;
10276 		} else {
10277 			vaddr[i] = PHYS_TO_DMAP(paddr);
10278 		}
10279 	}
10280 
10281 	/* Exit early if everything is covered by the DMAP */
10282 	if (!needs_mapping)
10283 		return (FALSE);
10284 
10285 	/*
10286 	 * NB:  The sequence of updating a page table followed by accesses
10287 	 * to the corresponding pages used in the !DMAP case is subject to
10288 	 * the situation described in the "AMD64 Architecture Programmer's
10289 	 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
10290 	 * Coherency Considerations".  Therefore, issuing the INVLPG right
10291 	 * after modifying the PTE bits is crucial.
10292 	 */
10293 	if (!can_fault)
10294 		sched_pin();
10295 	for (i = 0; i < count; i++) {
10296 		paddr = VM_PAGE_TO_PHYS(page[i]);
10297 		if (paddr >= dmaplimit) {
10298 			if (can_fault) {
10299 				/*
10300 				 * Slow path, since we can get page faults
10301 				 * while mappings are active don't pin the
10302 				 * thread to the CPU and instead add a global
10303 				 * mapping visible to all CPUs.
10304 				 */
10305 				pmap_qenter(vaddr[i], &page[i], 1);
10306 			} else {
10307 				pte = vtopte(vaddr[i]);
10308 				cache_bits = pmap_cache_bits(kernel_pmap,
10309 				    page[i]->md.pat_mode, 0);
10310 				pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
10311 				    cache_bits);
10312 				invlpg(vaddr[i]);
10313 			}
10314 		}
10315 	}
10316 
10317 	return (needs_mapping);
10318 }
10319 
10320 void
10321 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10322     boolean_t can_fault)
10323 {
10324 	vm_paddr_t paddr;
10325 	int i;
10326 
10327 	if (!can_fault)
10328 		sched_unpin();
10329 	for (i = 0; i < count; i++) {
10330 		paddr = VM_PAGE_TO_PHYS(page[i]);
10331 		if (paddr >= dmaplimit) {
10332 			if (can_fault)
10333 				pmap_qremove(vaddr[i], 1);
10334 			vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
10335 		}
10336 	}
10337 }
10338 
10339 vm_offset_t
10340 pmap_quick_enter_page(vm_page_t m)
10341 {
10342 	vm_paddr_t paddr;
10343 
10344 	paddr = VM_PAGE_TO_PHYS(m);
10345 	if (paddr < dmaplimit)
10346 		return (PHYS_TO_DMAP(paddr));
10347 	mtx_lock_spin(&qframe_mtx);
10348 	KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
10349 	pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
10350 	    X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0));
10351 	return (qframe);
10352 }
10353 
10354 void
10355 pmap_quick_remove_page(vm_offset_t addr)
10356 {
10357 
10358 	if (addr != qframe)
10359 		return;
10360 	pte_store(vtopte(qframe), 0);
10361 	invlpg(qframe);
10362 	mtx_unlock_spin(&qframe_mtx);
10363 }
10364 
10365 /*
10366  * Pdp pages from the large map are managed differently from either
10367  * kernel or user page table pages.  They are permanently allocated at
10368  * initialization time, and their reference count is permanently set to
10369  * zero.  The pml4 entries pointing to those pages are copied into
10370  * each allocated pmap.
10371  *
10372  * In contrast, pd and pt pages are managed like user page table
10373  * pages.  They are dynamically allocated, and their reference count
10374  * represents the number of valid entries within the page.
10375  */
10376 static vm_page_t
10377 pmap_large_map_getptp_unlocked(void)
10378 {
10379 	return (pmap_alloc_pt_page(kernel_pmap, 0, VM_ALLOC_ZERO));
10380 }
10381 
10382 static vm_page_t
10383 pmap_large_map_getptp(void)
10384 {
10385 	vm_page_t m;
10386 
10387 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
10388 	m = pmap_large_map_getptp_unlocked();
10389 	if (m == NULL) {
10390 		PMAP_UNLOCK(kernel_pmap);
10391 		vm_wait(NULL);
10392 		PMAP_LOCK(kernel_pmap);
10393 		/* Callers retry. */
10394 	}
10395 	return (m);
10396 }
10397 
10398 static pdp_entry_t *
10399 pmap_large_map_pdpe(vm_offset_t va)
10400 {
10401 	vm_pindex_t pml4_idx;
10402 	vm_paddr_t mphys;
10403 
10404 	pml4_idx = pmap_pml4e_index(va);
10405 	KASSERT(LMSPML4I <= pml4_idx && pml4_idx < LMSPML4I + lm_ents,
10406 	    ("pmap_large_map_pdpe: va %#jx out of range idx %#jx LMSPML4I "
10407 	    "%#jx lm_ents %d",
10408 	    (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10409 	KASSERT((kernel_pml4[pml4_idx] & X86_PG_V) != 0,
10410 	    ("pmap_large_map_pdpe: invalid pml4 for va %#jx idx %#jx "
10411 	    "LMSPML4I %#jx lm_ents %d",
10412 	    (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10413 	mphys = kernel_pml4[pml4_idx] & PG_FRAME;
10414 	return ((pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va));
10415 }
10416 
10417 static pd_entry_t *
10418 pmap_large_map_pde(vm_offset_t va)
10419 {
10420 	pdp_entry_t *pdpe;
10421 	vm_page_t m;
10422 	vm_paddr_t mphys;
10423 
10424 retry:
10425 	pdpe = pmap_large_map_pdpe(va);
10426 	if (*pdpe == 0) {
10427 		m = pmap_large_map_getptp();
10428 		if (m == NULL)
10429 			goto retry;
10430 		mphys = VM_PAGE_TO_PHYS(m);
10431 		*pdpe = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10432 	} else {
10433 		MPASS((*pdpe & X86_PG_PS) == 0);
10434 		mphys = *pdpe & PG_FRAME;
10435 	}
10436 	return ((pd_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pde_index(va));
10437 }
10438 
10439 static pt_entry_t *
10440 pmap_large_map_pte(vm_offset_t va)
10441 {
10442 	pd_entry_t *pde;
10443 	vm_page_t m;
10444 	vm_paddr_t mphys;
10445 
10446 retry:
10447 	pde = pmap_large_map_pde(va);
10448 	if (*pde == 0) {
10449 		m = pmap_large_map_getptp();
10450 		if (m == NULL)
10451 			goto retry;
10452 		mphys = VM_PAGE_TO_PHYS(m);
10453 		*pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10454 		PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++;
10455 	} else {
10456 		MPASS((*pde & X86_PG_PS) == 0);
10457 		mphys = *pde & PG_FRAME;
10458 	}
10459 	return ((pt_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pte_index(va));
10460 }
10461 
10462 static vm_paddr_t
10463 pmap_large_map_kextract(vm_offset_t va)
10464 {
10465 	pdp_entry_t *pdpe, pdp;
10466 	pd_entry_t *pde, pd;
10467 	pt_entry_t *pte, pt;
10468 
10469 	KASSERT(PMAP_ADDRESS_IN_LARGEMAP(va),
10470 	    ("not largemap range %#lx", (u_long)va));
10471 	pdpe = pmap_large_map_pdpe(va);
10472 	pdp = *pdpe;
10473 	KASSERT((pdp & X86_PG_V) != 0,
10474 	    ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10475 	    (u_long)pdpe, pdp));
10476 	if ((pdp & X86_PG_PS) != 0) {
10477 		KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10478 		    ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10479 		    (u_long)pdpe, pdp));
10480 		return ((pdp & PG_PS_PDP_FRAME) | (va & PDPMASK));
10481 	}
10482 	pde = pmap_pdpe_to_pde(pdpe, va);
10483 	pd = *pde;
10484 	KASSERT((pd & X86_PG_V) != 0,
10485 	    ("invalid pd va %#lx pde %#lx pd %#lx", va, (u_long)pde, pd));
10486 	if ((pd & X86_PG_PS) != 0)
10487 		return ((pd & PG_PS_FRAME) | (va & PDRMASK));
10488 	pte = pmap_pde_to_pte(pde, va);
10489 	pt = *pte;
10490 	KASSERT((pt & X86_PG_V) != 0,
10491 	    ("invalid pte va %#lx pte %#lx pt %#lx", va, (u_long)pte, pt));
10492 	return ((pt & PG_FRAME) | (va & PAGE_MASK));
10493 }
10494 
10495 static int
10496 pmap_large_map_getva(vm_size_t len, vm_offset_t align, vm_offset_t phase,
10497     vmem_addr_t *vmem_res)
10498 {
10499 
10500 	/*
10501 	 * Large mappings are all but static.  Consequently, there
10502 	 * is no point in waiting for an earlier allocation to be
10503 	 * freed.
10504 	 */
10505 	return (vmem_xalloc(large_vmem, len, align, phase, 0, VMEM_ADDR_MIN,
10506 	    VMEM_ADDR_MAX, M_NOWAIT | M_BESTFIT, vmem_res));
10507 }
10508 
10509 int
10510 pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
10511     vm_memattr_t mattr)
10512 {
10513 	pdp_entry_t *pdpe;
10514 	pd_entry_t *pde;
10515 	pt_entry_t *pte;
10516 	vm_offset_t va, inc;
10517 	vmem_addr_t vmem_res;
10518 	vm_paddr_t pa;
10519 	int error;
10520 
10521 	if (len == 0 || spa + len < spa)
10522 		return (EINVAL);
10523 
10524 	/* See if DMAP can serve. */
10525 	if (spa + len <= dmaplimit) {
10526 		va = PHYS_TO_DMAP(spa);
10527 		*addr = (void *)va;
10528 		return (pmap_change_attr(va, len, mattr));
10529 	}
10530 
10531 	/*
10532 	 * No, allocate KVA.  Fit the address with best possible
10533 	 * alignment for superpages.  Fall back to worse align if
10534 	 * failed.
10535 	 */
10536 	error = ENOMEM;
10537 	if ((amd_feature & AMDID_PAGE1GB) != 0 && rounddown2(spa + len,
10538 	    NBPDP) >= roundup2(spa, NBPDP) + NBPDP)
10539 		error = pmap_large_map_getva(len, NBPDP, spa & PDPMASK,
10540 		    &vmem_res);
10541 	if (error != 0 && rounddown2(spa + len, NBPDR) >= roundup2(spa,
10542 	    NBPDR) + NBPDR)
10543 		error = pmap_large_map_getva(len, NBPDR, spa & PDRMASK,
10544 		    &vmem_res);
10545 	if (error != 0)
10546 		error = pmap_large_map_getva(len, PAGE_SIZE, 0, &vmem_res);
10547 	if (error != 0)
10548 		return (error);
10549 
10550 	/*
10551 	 * Fill pagetable.  PG_M is not pre-set, we scan modified bits
10552 	 * in the pagetable to minimize flushing.  No need to
10553 	 * invalidate TLB, since we only update invalid entries.
10554 	 */
10555 	PMAP_LOCK(kernel_pmap);
10556 	for (pa = spa, va = vmem_res; len > 0; pa += inc, va += inc,
10557 	    len -= inc) {
10558 		if ((amd_feature & AMDID_PAGE1GB) != 0 && len >= NBPDP &&
10559 		    (pa & PDPMASK) == 0 && (va & PDPMASK) == 0) {
10560 			pdpe = pmap_large_map_pdpe(va);
10561 			MPASS(*pdpe == 0);
10562 			*pdpe = pa | pg_g | X86_PG_PS | X86_PG_RW |
10563 			    X86_PG_V | X86_PG_A | pg_nx |
10564 			    pmap_cache_bits(kernel_pmap, mattr, TRUE);
10565 			inc = NBPDP;
10566 		} else if (len >= NBPDR && (pa & PDRMASK) == 0 &&
10567 		    (va & PDRMASK) == 0) {
10568 			pde = pmap_large_map_pde(va);
10569 			MPASS(*pde == 0);
10570 			*pde = pa | pg_g | X86_PG_PS | X86_PG_RW |
10571 			    X86_PG_V | X86_PG_A | pg_nx |
10572 			    pmap_cache_bits(kernel_pmap, mattr, TRUE);
10573 			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
10574 			    ref_count++;
10575 			inc = NBPDR;
10576 		} else {
10577 			pte = pmap_large_map_pte(va);
10578 			MPASS(*pte == 0);
10579 			*pte = pa | pg_g | X86_PG_RW | X86_PG_V |
10580 			    X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
10581 			    mattr, FALSE);
10582 			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
10583 			    ref_count++;
10584 			inc = PAGE_SIZE;
10585 		}
10586 	}
10587 	PMAP_UNLOCK(kernel_pmap);
10588 	MPASS(len == 0);
10589 
10590 	*addr = (void *)vmem_res;
10591 	return (0);
10592 }
10593 
10594 void
10595 pmap_large_unmap(void *svaa, vm_size_t len)
10596 {
10597 	vm_offset_t sva, va;
10598 	vm_size_t inc;
10599 	pdp_entry_t *pdpe, pdp;
10600 	pd_entry_t *pde, pd;
10601 	pt_entry_t *pte;
10602 	vm_page_t m;
10603 	struct spglist spgf;
10604 
10605 	sva = (vm_offset_t)svaa;
10606 	if (len == 0 || sva + len < sva || (sva >= DMAP_MIN_ADDRESS &&
10607 	    sva + len <= DMAP_MIN_ADDRESS + dmaplimit))
10608 		return;
10609 
10610 	SLIST_INIT(&spgf);
10611 	KASSERT(PMAP_ADDRESS_IN_LARGEMAP(sva) &&
10612 	    PMAP_ADDRESS_IN_LARGEMAP(sva + len - 1),
10613 	    ("not largemap range %#lx %#lx", (u_long)svaa, (u_long)svaa + len));
10614 	PMAP_LOCK(kernel_pmap);
10615 	for (va = sva; va < sva + len; va += inc) {
10616 		pdpe = pmap_large_map_pdpe(va);
10617 		pdp = *pdpe;
10618 		KASSERT((pdp & X86_PG_V) != 0,
10619 		    ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10620 		    (u_long)pdpe, pdp));
10621 		if ((pdp & X86_PG_PS) != 0) {
10622 			KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10623 			    ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10624 			    (u_long)pdpe, pdp));
10625 			KASSERT((va & PDPMASK) == 0,
10626 			    ("PDPMASK bit set, va %#lx pdpe %#lx pdp %#lx", va,
10627 			    (u_long)pdpe, pdp));
10628 			KASSERT(va + NBPDP <= sva + len,
10629 			    ("unmap covers partial 1GB page, sva %#lx va %#lx "
10630 			    "pdpe %#lx pdp %#lx len %#lx", sva, va,
10631 			    (u_long)pdpe, pdp, len));
10632 			*pdpe = 0;
10633 			inc = NBPDP;
10634 			continue;
10635 		}
10636 		pde = pmap_pdpe_to_pde(pdpe, va);
10637 		pd = *pde;
10638 		KASSERT((pd & X86_PG_V) != 0,
10639 		    ("invalid pd va %#lx pde %#lx pd %#lx", va,
10640 		    (u_long)pde, pd));
10641 		if ((pd & X86_PG_PS) != 0) {
10642 			KASSERT((va & PDRMASK) == 0,
10643 			    ("PDRMASK bit set, va %#lx pde %#lx pd %#lx", va,
10644 			    (u_long)pde, pd));
10645 			KASSERT(va + NBPDR <= sva + len,
10646 			    ("unmap covers partial 2MB page, sva %#lx va %#lx "
10647 			    "pde %#lx pd %#lx len %#lx", sva, va, (u_long)pde,
10648 			    pd, len));
10649 			pde_store(pde, 0);
10650 			inc = NBPDR;
10651 			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10652 			m->ref_count--;
10653 			if (m->ref_count == 0) {
10654 				*pdpe = 0;
10655 				SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10656 			}
10657 			continue;
10658 		}
10659 		pte = pmap_pde_to_pte(pde, va);
10660 		KASSERT((*pte & X86_PG_V) != 0,
10661 		    ("invalid pte va %#lx pte %#lx pt %#lx", va,
10662 		    (u_long)pte, *pte));
10663 		pte_clear(pte);
10664 		inc = PAGE_SIZE;
10665 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte));
10666 		m->ref_count--;
10667 		if (m->ref_count == 0) {
10668 			*pde = 0;
10669 			SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10670 			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10671 			m->ref_count--;
10672 			if (m->ref_count == 0) {
10673 				*pdpe = 0;
10674 				SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10675 			}
10676 		}
10677 	}
10678 	pmap_invalidate_range(kernel_pmap, sva, sva + len);
10679 	PMAP_UNLOCK(kernel_pmap);
10680 	vm_page_free_pages_toq(&spgf, false);
10681 	vmem_free(large_vmem, sva, len);
10682 }
10683 
10684 static void
10685 pmap_large_map_wb_fence_mfence(void)
10686 {
10687 
10688 	mfence();
10689 }
10690 
10691 static void
10692 pmap_large_map_wb_fence_atomic(void)
10693 {
10694 
10695 	atomic_thread_fence_seq_cst();
10696 }
10697 
10698 static void
10699 pmap_large_map_wb_fence_nop(void)
10700 {
10701 }
10702 
10703 DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (void))
10704 {
10705 
10706 	if (cpu_vendor_id != CPU_VENDOR_INTEL)
10707 		return (pmap_large_map_wb_fence_mfence);
10708 	else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
10709 	    CPUID_STDEXT_CLFLUSHOPT)) == 0)
10710 		return (pmap_large_map_wb_fence_atomic);
10711 	else
10712 		/* clflush is strongly enough ordered */
10713 		return (pmap_large_map_wb_fence_nop);
10714 }
10715 
10716 static void
10717 pmap_large_map_flush_range_clwb(vm_offset_t va, vm_size_t len)
10718 {
10719 
10720 	for (; len > 0; len -= cpu_clflush_line_size,
10721 	    va += cpu_clflush_line_size)
10722 		clwb(va);
10723 }
10724 
10725 static void
10726 pmap_large_map_flush_range_clflushopt(vm_offset_t va, vm_size_t len)
10727 {
10728 
10729 	for (; len > 0; len -= cpu_clflush_line_size,
10730 	    va += cpu_clflush_line_size)
10731 		clflushopt(va);
10732 }
10733 
10734 static void
10735 pmap_large_map_flush_range_clflush(vm_offset_t va, vm_size_t len)
10736 {
10737 
10738 	for (; len > 0; len -= cpu_clflush_line_size,
10739 	    va += cpu_clflush_line_size)
10740 		clflush(va);
10741 }
10742 
10743 static void
10744 pmap_large_map_flush_range_nop(vm_offset_t sva __unused, vm_size_t len __unused)
10745 {
10746 }
10747 
10748 DEFINE_IFUNC(static, void, pmap_large_map_flush_range, (vm_offset_t, vm_size_t))
10749 {
10750 
10751 	if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) != 0)
10752 		return (pmap_large_map_flush_range_clwb);
10753 	else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0)
10754 		return (pmap_large_map_flush_range_clflushopt);
10755 	else if ((cpu_feature & CPUID_CLFSH) != 0)
10756 		return (pmap_large_map_flush_range_clflush);
10757 	else
10758 		return (pmap_large_map_flush_range_nop);
10759 }
10760 
10761 static void
10762 pmap_large_map_wb_large(vm_offset_t sva, vm_offset_t eva)
10763 {
10764 	volatile u_long *pe;
10765 	u_long p;
10766 	vm_offset_t va;
10767 	vm_size_t inc;
10768 	bool seen_other;
10769 
10770 	for (va = sva; va < eva; va += inc) {
10771 		inc = 0;
10772 		if ((amd_feature & AMDID_PAGE1GB) != 0) {
10773 			pe = (volatile u_long *)pmap_large_map_pdpe(va);
10774 			p = *pe;
10775 			if ((p & X86_PG_PS) != 0)
10776 				inc = NBPDP;
10777 		}
10778 		if (inc == 0) {
10779 			pe = (volatile u_long *)pmap_large_map_pde(va);
10780 			p = *pe;
10781 			if ((p & X86_PG_PS) != 0)
10782 				inc = NBPDR;
10783 		}
10784 		if (inc == 0) {
10785 			pe = (volatile u_long *)pmap_large_map_pte(va);
10786 			p = *pe;
10787 			inc = PAGE_SIZE;
10788 		}
10789 		seen_other = false;
10790 		for (;;) {
10791 			if ((p & X86_PG_AVAIL1) != 0) {
10792 				/*
10793 				 * Spin-wait for the end of a parallel
10794 				 * write-back.
10795 				 */
10796 				cpu_spinwait();
10797 				p = *pe;
10798 
10799 				/*
10800 				 * If we saw other write-back
10801 				 * occuring, we cannot rely on PG_M to
10802 				 * indicate state of the cache.  The
10803 				 * PG_M bit is cleared before the
10804 				 * flush to avoid ignoring new writes,
10805 				 * and writes which are relevant for
10806 				 * us might happen after.
10807 				 */
10808 				seen_other = true;
10809 				continue;
10810 			}
10811 
10812 			if ((p & X86_PG_M) != 0 || seen_other) {
10813 				if (!atomic_fcmpset_long(pe, &p,
10814 				    (p & ~X86_PG_M) | X86_PG_AVAIL1))
10815 					/*
10816 					 * If we saw PG_M without
10817 					 * PG_AVAIL1, and then on the
10818 					 * next attempt we do not
10819 					 * observe either PG_M or
10820 					 * PG_AVAIL1, the other
10821 					 * write-back started after us
10822 					 * and finished before us.  We
10823 					 * can rely on it doing our
10824 					 * work.
10825 					 */
10826 					continue;
10827 				pmap_large_map_flush_range(va, inc);
10828 				atomic_clear_long(pe, X86_PG_AVAIL1);
10829 			}
10830 			break;
10831 		}
10832 		maybe_yield();
10833 	}
10834 }
10835 
10836 /*
10837  * Write-back cache lines for the given address range.
10838  *
10839  * Must be called only on the range or sub-range returned from
10840  * pmap_large_map().  Must not be called on the coalesced ranges.
10841  *
10842  * Does nothing on CPUs without CLWB, CLFLUSHOPT, or CLFLUSH
10843  * instructions support.
10844  */
10845 void
10846 pmap_large_map_wb(void *svap, vm_size_t len)
10847 {
10848 	vm_offset_t eva, sva;
10849 
10850 	sva = (vm_offset_t)svap;
10851 	eva = sva + len;
10852 	pmap_large_map_wb_fence();
10853 	if (sva >= DMAP_MIN_ADDRESS && eva <= DMAP_MIN_ADDRESS + dmaplimit) {
10854 		pmap_large_map_flush_range(sva, len);
10855 	} else {
10856 		KASSERT(sva >= LARGEMAP_MIN_ADDRESS &&
10857 		    eva <= LARGEMAP_MIN_ADDRESS + lm_ents * NBPML4,
10858 		    ("pmap_large_map_wb: not largemap %#lx %#lx", sva, len));
10859 		pmap_large_map_wb_large(sva, eva);
10860 	}
10861 	pmap_large_map_wb_fence();
10862 }
10863 
10864 static vm_page_t
10865 pmap_pti_alloc_page(void)
10866 {
10867 	vm_page_t m;
10868 
10869 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10870 	m = vm_page_grab(pti_obj, pti_pg_idx++, VM_ALLOC_NOBUSY |
10871 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
10872 	return (m);
10873 }
10874 
10875 static bool
10876 pmap_pti_free_page(vm_page_t m)
10877 {
10878 
10879 	KASSERT(m->ref_count > 0, ("page %p not referenced", m));
10880 	if (!vm_page_unwire_noq(m))
10881 		return (false);
10882 	vm_page_free_zero(m);
10883 	return (true);
10884 }
10885 
10886 static void
10887 pmap_pti_init(void)
10888 {
10889 	vm_page_t pml4_pg;
10890 	pdp_entry_t *pdpe;
10891 	vm_offset_t va;
10892 	int i;
10893 
10894 	if (!pti)
10895 		return;
10896 	pti_obj = vm_pager_allocate(OBJT_PHYS, NULL, 0, VM_PROT_ALL, 0, NULL);
10897 	VM_OBJECT_WLOCK(pti_obj);
10898 	pml4_pg = pmap_pti_alloc_page();
10899 	pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
10900 	for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
10901 	    va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
10902 		pdpe = pmap_pti_pdpe(va);
10903 		pmap_pti_wire_pte(pdpe);
10904 	}
10905 	pmap_pti_add_kva_locked((vm_offset_t)&__pcpu[0],
10906 	    (vm_offset_t)&__pcpu[0] + sizeof(__pcpu[0]) * MAXCPU, false);
10907 	pmap_pti_add_kva_locked((vm_offset_t)idt, (vm_offset_t)idt +
10908 	    sizeof(struct gate_descriptor) * NIDT, false);
10909 	CPU_FOREACH(i) {
10910 		/* Doublefault stack IST 1 */
10911 		va = __pcpu[i].pc_common_tss.tss_ist1 + sizeof(struct nmi_pcpu);
10912 		pmap_pti_add_kva_locked(va - DBLFAULT_STACK_SIZE, va, false);
10913 		/* NMI stack IST 2 */
10914 		va = __pcpu[i].pc_common_tss.tss_ist2 + sizeof(struct nmi_pcpu);
10915 		pmap_pti_add_kva_locked(va - NMI_STACK_SIZE, va, false);
10916 		/* MC# stack IST 3 */
10917 		va = __pcpu[i].pc_common_tss.tss_ist3 +
10918 		    sizeof(struct nmi_pcpu);
10919 		pmap_pti_add_kva_locked(va - MCE_STACK_SIZE, va, false);
10920 		/* DB# stack IST 4 */
10921 		va = __pcpu[i].pc_common_tss.tss_ist4 + sizeof(struct nmi_pcpu);
10922 		pmap_pti_add_kva_locked(va - DBG_STACK_SIZE, va, false);
10923 	}
10924 	pmap_pti_add_kva_locked((vm_offset_t)KERNSTART, (vm_offset_t)etext,
10925 	    true);
10926 	pti_finalized = true;
10927 	VM_OBJECT_WUNLOCK(pti_obj);
10928 }
10929 
10930 static void
10931 pmap_cpu_init(void *arg __unused)
10932 {
10933 	CPU_COPY(&all_cpus, &kernel_pmap->pm_active);
10934 	pmap_pti_init();
10935 }
10936 SYSINIT(pmap_cpu, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_cpu_init, NULL);
10937 
10938 static pdp_entry_t *
10939 pmap_pti_pdpe(vm_offset_t va)
10940 {
10941 	pml4_entry_t *pml4e;
10942 	pdp_entry_t *pdpe;
10943 	vm_page_t m;
10944 	vm_pindex_t pml4_idx;
10945 	vm_paddr_t mphys;
10946 
10947 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10948 
10949 	pml4_idx = pmap_pml4e_index(va);
10950 	pml4e = &pti_pml4[pml4_idx];
10951 	m = NULL;
10952 	if (*pml4e == 0) {
10953 		if (pti_finalized)
10954 			panic("pml4 alloc after finalization\n");
10955 		m = pmap_pti_alloc_page();
10956 		if (*pml4e != 0) {
10957 			pmap_pti_free_page(m);
10958 			mphys = *pml4e & ~PAGE_MASK;
10959 		} else {
10960 			mphys = VM_PAGE_TO_PHYS(m);
10961 			*pml4e = mphys | X86_PG_RW | X86_PG_V;
10962 		}
10963 	} else {
10964 		mphys = *pml4e & ~PAGE_MASK;
10965 	}
10966 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va);
10967 	return (pdpe);
10968 }
10969 
10970 static void
10971 pmap_pti_wire_pte(void *pte)
10972 {
10973 	vm_page_t m;
10974 
10975 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10976 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
10977 	m->ref_count++;
10978 }
10979 
10980 static void
10981 pmap_pti_unwire_pde(void *pde, bool only_ref)
10982 {
10983 	vm_page_t m;
10984 
10985 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10986 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
10987 	MPASS(m->ref_count > 0);
10988 	MPASS(only_ref || m->ref_count > 1);
10989 	pmap_pti_free_page(m);
10990 }
10991 
10992 static void
10993 pmap_pti_unwire_pte(void *pte, vm_offset_t va)
10994 {
10995 	vm_page_t m;
10996 	pd_entry_t *pde;
10997 
10998 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10999 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
11000 	MPASS(m->ref_count > 0);
11001 	if (pmap_pti_free_page(m)) {
11002 		pde = pmap_pti_pde(va);
11003 		MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);
11004 		*pde = 0;
11005 		pmap_pti_unwire_pde(pde, false);
11006 	}
11007 }
11008 
11009 static pd_entry_t *
11010 pmap_pti_pde(vm_offset_t va)
11011 {
11012 	pdp_entry_t *pdpe;
11013 	pd_entry_t *pde;
11014 	vm_page_t m;
11015 	vm_pindex_t pd_idx;
11016 	vm_paddr_t mphys;
11017 
11018 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11019 
11020 	pdpe = pmap_pti_pdpe(va);
11021 	if (*pdpe == 0) {
11022 		m = pmap_pti_alloc_page();
11023 		if (*pdpe != 0) {
11024 			pmap_pti_free_page(m);
11025 			MPASS((*pdpe & X86_PG_PS) == 0);
11026 			mphys = *pdpe & ~PAGE_MASK;
11027 		} else {
11028 			mphys =  VM_PAGE_TO_PHYS(m);
11029 			*pdpe = mphys | X86_PG_RW | X86_PG_V;
11030 		}
11031 	} else {
11032 		MPASS((*pdpe & X86_PG_PS) == 0);
11033 		mphys = *pdpe & ~PAGE_MASK;
11034 	}
11035 
11036 	pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
11037 	pd_idx = pmap_pde_index(va);
11038 	pde += pd_idx;
11039 	return (pde);
11040 }
11041 
11042 static pt_entry_t *
11043 pmap_pti_pte(vm_offset_t va, bool *unwire_pde)
11044 {
11045 	pd_entry_t *pde;
11046 	pt_entry_t *pte;
11047 	vm_page_t m;
11048 	vm_paddr_t mphys;
11049 
11050 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11051 
11052 	pde = pmap_pti_pde(va);
11053 	if (unwire_pde != NULL) {
11054 		*unwire_pde = true;
11055 		pmap_pti_wire_pte(pde);
11056 	}
11057 	if (*pde == 0) {
11058 		m = pmap_pti_alloc_page();
11059 		if (*pde != 0) {
11060 			pmap_pti_free_page(m);
11061 			MPASS((*pde & X86_PG_PS) == 0);
11062 			mphys = *pde & ~(PAGE_MASK | pg_nx);
11063 		} else {
11064 			mphys = VM_PAGE_TO_PHYS(m);
11065 			*pde = mphys | X86_PG_RW | X86_PG_V;
11066 			if (unwire_pde != NULL)
11067 				*unwire_pde = false;
11068 		}
11069 	} else {
11070 		MPASS((*pde & X86_PG_PS) == 0);
11071 		mphys = *pde & ~(PAGE_MASK | pg_nx);
11072 	}
11073 
11074 	pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
11075 	pte += pmap_pte_index(va);
11076 
11077 	return (pte);
11078 }
11079 
11080 static void
11081 pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, bool exec)
11082 {
11083 	vm_paddr_t pa;
11084 	pd_entry_t *pde;
11085 	pt_entry_t *pte, ptev;
11086 	bool unwire_pde;
11087 
11088 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11089 
11090 	sva = trunc_page(sva);
11091 	MPASS(sva > VM_MAXUSER_ADDRESS);
11092 	eva = round_page(eva);
11093 	MPASS(sva < eva);
11094 	for (; sva < eva; sva += PAGE_SIZE) {
11095 		pte = pmap_pti_pte(sva, &unwire_pde);
11096 		pa = pmap_kextract(sva);
11097 		ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_G |
11098 		    (exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap,
11099 		    VM_MEMATTR_DEFAULT, FALSE);
11100 		if (*pte == 0) {
11101 			pte_store(pte, ptev);
11102 			pmap_pti_wire_pte(pte);
11103 		} else {
11104 			KASSERT(!pti_finalized,
11105 			    ("pti overlap after fin %#lx %#lx %#lx",
11106 			    sva, *pte, ptev));
11107 			KASSERT(*pte == ptev,
11108 			    ("pti non-identical pte after fin %#lx %#lx %#lx",
11109 			    sva, *pte, ptev));
11110 		}
11111 		if (unwire_pde) {
11112 			pde = pmap_pti_pde(sva);
11113 			pmap_pti_unwire_pde(pde, true);
11114 		}
11115 	}
11116 }
11117 
11118 void
11119 pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec)
11120 {
11121 
11122 	if (!pti)
11123 		return;
11124 	VM_OBJECT_WLOCK(pti_obj);
11125 	pmap_pti_add_kva_locked(sva, eva, exec);
11126 	VM_OBJECT_WUNLOCK(pti_obj);
11127 }
11128 
11129 void
11130 pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva)
11131 {
11132 	pt_entry_t *pte;
11133 	vm_offset_t va;
11134 
11135 	if (!pti)
11136 		return;
11137 	sva = rounddown2(sva, PAGE_SIZE);
11138 	MPASS(sva > VM_MAXUSER_ADDRESS);
11139 	eva = roundup2(eva, PAGE_SIZE);
11140 	MPASS(sva < eva);
11141 	VM_OBJECT_WLOCK(pti_obj);
11142 	for (va = sva; va < eva; va += PAGE_SIZE) {
11143 		pte = pmap_pti_pte(va, NULL);
11144 		KASSERT((*pte & X86_PG_V) != 0,
11145 		    ("invalid pte va %#lx pte %#lx pt %#lx", va,
11146 		    (u_long)pte, *pte));
11147 		pte_clear(pte);
11148 		pmap_pti_unwire_pte(pte, va);
11149 	}
11150 	pmap_invalidate_range(kernel_pmap, sva, eva);
11151 	VM_OBJECT_WUNLOCK(pti_obj);
11152 }
11153 
11154 static void *
11155 pkru_dup_range(void *ctx __unused, void *data)
11156 {
11157 	struct pmap_pkru_range *node, *new_node;
11158 
11159 	new_node = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
11160 	if (new_node == NULL)
11161 		return (NULL);
11162 	node = data;
11163 	memcpy(new_node, node, sizeof(*node));
11164 	return (new_node);
11165 }
11166 
11167 static void
11168 pkru_free_range(void *ctx __unused, void *node)
11169 {
11170 
11171 	uma_zfree(pmap_pkru_ranges_zone, node);
11172 }
11173 
11174 static int
11175 pmap_pkru_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
11176     int flags)
11177 {
11178 	struct pmap_pkru_range *ppr;
11179 	int error;
11180 
11181 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11182 	MPASS(pmap->pm_type == PT_X86);
11183 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11184 	if ((flags & AMD64_PKRU_EXCL) != 0 &&
11185 	    !rangeset_check_empty(&pmap->pm_pkru, sva, eva))
11186 		return (EBUSY);
11187 	ppr = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
11188 	if (ppr == NULL)
11189 		return (ENOMEM);
11190 	ppr->pkru_keyidx = keyidx;
11191 	ppr->pkru_flags = flags & AMD64_PKRU_PERSIST;
11192 	error = rangeset_insert(&pmap->pm_pkru, sva, eva, ppr);
11193 	if (error != 0)
11194 		uma_zfree(pmap_pkru_ranges_zone, ppr);
11195 	return (error);
11196 }
11197 
11198 static int
11199 pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11200 {
11201 
11202 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11203 	MPASS(pmap->pm_type == PT_X86);
11204 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11205 	return (rangeset_remove(&pmap->pm_pkru, sva, eva));
11206 }
11207 
11208 static void
11209 pmap_pkru_deassign_all(pmap_t pmap)
11210 {
11211 
11212 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11213 	if (pmap->pm_type == PT_X86 &&
11214 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
11215 		rangeset_remove_all(&pmap->pm_pkru);
11216 }
11217 
11218 static bool
11219 pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11220 {
11221 	struct pmap_pkru_range *ppr, *prev_ppr;
11222 	vm_offset_t va;
11223 
11224 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11225 	if (pmap->pm_type != PT_X86 ||
11226 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
11227 	    sva >= VM_MAXUSER_ADDRESS)
11228 		return (true);
11229 	MPASS(eva <= VM_MAXUSER_ADDRESS);
11230 	for (va = sva; va < eva; prev_ppr = ppr) {
11231 		ppr = rangeset_lookup(&pmap->pm_pkru, va);
11232 		if (va == sva)
11233 			prev_ppr = ppr;
11234 		else if ((ppr == NULL) ^ (prev_ppr == NULL))
11235 			return (false);
11236 		if (ppr == NULL) {
11237 			va += PAGE_SIZE;
11238 			continue;
11239 		}
11240 		if (prev_ppr->pkru_keyidx != ppr->pkru_keyidx)
11241 			return (false);
11242 		va = ppr->pkru_rs_el.re_end;
11243 	}
11244 	return (true);
11245 }
11246 
11247 static pt_entry_t
11248 pmap_pkru_get(pmap_t pmap, vm_offset_t va)
11249 {
11250 	struct pmap_pkru_range *ppr;
11251 
11252 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11253 	if (pmap->pm_type != PT_X86 ||
11254 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
11255 	    va >= VM_MAXUSER_ADDRESS)
11256 		return (0);
11257 	ppr = rangeset_lookup(&pmap->pm_pkru, va);
11258 	if (ppr != NULL)
11259 		return (X86_PG_PKU(ppr->pkru_keyidx));
11260 	return (0);
11261 }
11262 
11263 static bool
11264 pred_pkru_on_remove(void *ctx __unused, void *r)
11265 {
11266 	struct pmap_pkru_range *ppr;
11267 
11268 	ppr = r;
11269 	return ((ppr->pkru_flags & AMD64_PKRU_PERSIST) == 0);
11270 }
11271 
11272 static void
11273 pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11274 {
11275 
11276 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11277 	if (pmap->pm_type == PT_X86 &&
11278 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
11279 		rangeset_remove_pred(&pmap->pm_pkru, sva, eva,
11280 		    pred_pkru_on_remove);
11281 	}
11282 }
11283 
11284 static int
11285 pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap)
11286 {
11287 
11288 	PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED);
11289 	PMAP_LOCK_ASSERT(src_pmap, MA_OWNED);
11290 	MPASS(dst_pmap->pm_type == PT_X86);
11291 	MPASS(src_pmap->pm_type == PT_X86);
11292 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11293 	if (src_pmap->pm_pkru.rs_data_ctx == NULL)
11294 		return (0);
11295 	return (rangeset_copy(&dst_pmap->pm_pkru, &src_pmap->pm_pkru));
11296 }
11297 
11298 static void
11299 pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11300     u_int keyidx)
11301 {
11302 	pml4_entry_t *pml4e;
11303 	pdp_entry_t *pdpe;
11304 	pd_entry_t newpde, ptpaddr, *pde;
11305 	pt_entry_t newpte, *ptep, pte;
11306 	vm_offset_t va, va_next;
11307 	bool changed;
11308 
11309 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11310 	MPASS(pmap->pm_type == PT_X86);
11311 	MPASS(keyidx <= PMAP_MAX_PKRU_IDX);
11312 
11313 	for (changed = false, va = sva; va < eva; va = va_next) {
11314 		pml4e = pmap_pml4e(pmap, va);
11315 		if (pml4e == NULL || (*pml4e & X86_PG_V) == 0) {
11316 			va_next = (va + NBPML4) & ~PML4MASK;
11317 			if (va_next < va)
11318 				va_next = eva;
11319 			continue;
11320 		}
11321 
11322 		pdpe = pmap_pml4e_to_pdpe(pml4e, va);
11323 		if ((*pdpe & X86_PG_V) == 0) {
11324 			va_next = (va + NBPDP) & ~PDPMASK;
11325 			if (va_next < va)
11326 				va_next = eva;
11327 			continue;
11328 		}
11329 
11330 		va_next = (va + NBPDR) & ~PDRMASK;
11331 		if (va_next < va)
11332 			va_next = eva;
11333 
11334 		pde = pmap_pdpe_to_pde(pdpe, va);
11335 		ptpaddr = *pde;
11336 		if (ptpaddr == 0)
11337 			continue;
11338 
11339 		MPASS((ptpaddr & X86_PG_V) != 0);
11340 		if ((ptpaddr & PG_PS) != 0) {
11341 			if (va + NBPDR == va_next && eva >= va_next) {
11342 				newpde = (ptpaddr & ~X86_PG_PKU_MASK) |
11343 				    X86_PG_PKU(keyidx);
11344 				if (newpde != ptpaddr) {
11345 					*pde = newpde;
11346 					changed = true;
11347 				}
11348 				continue;
11349 			} else if (!pmap_demote_pde(pmap, pde, va)) {
11350 				continue;
11351 			}
11352 		}
11353 
11354 		if (va_next > eva)
11355 			va_next = eva;
11356 
11357 		for (ptep = pmap_pde_to_pte(pde, va); va != va_next;
11358 		    ptep++, va += PAGE_SIZE) {
11359 			pte = *ptep;
11360 			if ((pte & X86_PG_V) == 0)
11361 				continue;
11362 			newpte = (pte & ~X86_PG_PKU_MASK) | X86_PG_PKU(keyidx);
11363 			if (newpte != pte) {
11364 				*ptep = newpte;
11365 				changed = true;
11366 			}
11367 		}
11368 	}
11369 	if (changed)
11370 		pmap_invalidate_range(pmap, sva, eva);
11371 }
11372 
11373 static int
11374 pmap_pkru_check_uargs(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11375     u_int keyidx, int flags)
11376 {
11377 
11378 	if (pmap->pm_type != PT_X86 || keyidx > PMAP_MAX_PKRU_IDX ||
11379 	    (flags & ~(AMD64_PKRU_PERSIST | AMD64_PKRU_EXCL)) != 0)
11380 		return (EINVAL);
11381 	if (eva <= sva || eva > VM_MAXUSER_ADDRESS)
11382 		return (EFAULT);
11383 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
11384 		return (ENOTSUP);
11385 	return (0);
11386 }
11387 
11388 int
11389 pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
11390     int flags)
11391 {
11392 	int error;
11393 
11394 	sva = trunc_page(sva);
11395 	eva = round_page(eva);
11396 	error = pmap_pkru_check_uargs(pmap, sva, eva, keyidx, flags);
11397 	if (error != 0)
11398 		return (error);
11399 	for (;;) {
11400 		PMAP_LOCK(pmap);
11401 		error = pmap_pkru_assign(pmap, sva, eva, keyidx, flags);
11402 		if (error == 0)
11403 			pmap_pkru_update_range(pmap, sva, eva, keyidx);
11404 		PMAP_UNLOCK(pmap);
11405 		if (error != ENOMEM)
11406 			break;
11407 		vm_wait(NULL);
11408 	}
11409 	return (error);
11410 }
11411 
11412 int
11413 pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11414 {
11415 	int error;
11416 
11417 	sva = trunc_page(sva);
11418 	eva = round_page(eva);
11419 	error = pmap_pkru_check_uargs(pmap, sva, eva, 0, 0);
11420 	if (error != 0)
11421 		return (error);
11422 	for (;;) {
11423 		PMAP_LOCK(pmap);
11424 		error = pmap_pkru_deassign(pmap, sva, eva);
11425 		if (error == 0)
11426 			pmap_pkru_update_range(pmap, sva, eva, 0);
11427 		PMAP_UNLOCK(pmap);
11428 		if (error != ENOMEM)
11429 			break;
11430 		vm_wait(NULL);
11431 	}
11432 	return (error);
11433 }
11434 
11435 #if defined(KASAN) || defined(KMSAN)
11436 static vm_page_t
11437 pmap_san_enter_alloc_4k(void)
11438 {
11439 	vm_page_t m;
11440 
11441 	m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
11442 	    VM_ALLOC_ZERO);
11443 	if (m == NULL)
11444 		panic("%s: no memory to grow shadow map", __func__);
11445 	return (m);
11446 }
11447 
11448 static vm_page_t
11449 pmap_san_enter_alloc_2m(void)
11450 {
11451 	return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
11452 	    NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT));
11453 }
11454 
11455 /*
11456  * Grow a shadow map by at least one 4KB page at the specified address.  Use 2MB
11457  * pages when possible.
11458  */
11459 void
11460 pmap_san_enter(vm_offset_t va)
11461 {
11462 	pdp_entry_t *pdpe;
11463 	pd_entry_t *pde;
11464 	pt_entry_t *pte;
11465 	vm_page_t m;
11466 
11467 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
11468 
11469 	pdpe = pmap_pdpe(kernel_pmap, va);
11470 	if ((*pdpe & X86_PG_V) == 0) {
11471 		m = pmap_san_enter_alloc_4k();
11472 		*pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11473 		    X86_PG_V | pg_nx);
11474 	}
11475 	pde = pmap_pdpe_to_pde(pdpe, va);
11476 	if ((*pde & X86_PG_V) == 0) {
11477 		m = pmap_san_enter_alloc_2m();
11478 		if (m != NULL) {
11479 			*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11480 			    X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx);
11481 		} else {
11482 			m = pmap_san_enter_alloc_4k();
11483 			*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11484 			    X86_PG_V | pg_nx);
11485 		}
11486 	}
11487 	if ((*pde & X86_PG_PS) != 0)
11488 		return;
11489 	pte = pmap_pde_to_pte(pde, va);
11490 	if ((*pte & X86_PG_V) != 0)
11491 		return;
11492 	m = pmap_san_enter_alloc_4k();
11493 	*pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V |
11494 	    X86_PG_M | X86_PG_A | pg_nx);
11495 }
11496 #endif
11497 
11498 /*
11499  * Track a range of the kernel's virtual address space that is contiguous
11500  * in various mapping attributes.
11501  */
11502 struct pmap_kernel_map_range {
11503 	vm_offset_t sva;
11504 	pt_entry_t attrs;
11505 	int ptes;
11506 	int pdes;
11507 	int pdpes;
11508 };
11509 
11510 static void
11511 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
11512     vm_offset_t eva)
11513 {
11514 	const char *mode;
11515 	int i, pat_idx;
11516 
11517 	if (eva <= range->sva)
11518 		return;
11519 
11520 	pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true);
11521 	for (i = 0; i < PAT_INDEX_SIZE; i++)
11522 		if (pat_index[i] == pat_idx)
11523 			break;
11524 
11525 	switch (i) {
11526 	case PAT_WRITE_BACK:
11527 		mode = "WB";
11528 		break;
11529 	case PAT_WRITE_THROUGH:
11530 		mode = "WT";
11531 		break;
11532 	case PAT_UNCACHEABLE:
11533 		mode = "UC";
11534 		break;
11535 	case PAT_UNCACHED:
11536 		mode = "U-";
11537 		break;
11538 	case PAT_WRITE_PROTECTED:
11539 		mode = "WP";
11540 		break;
11541 	case PAT_WRITE_COMBINING:
11542 		mode = "WC";
11543 		break;
11544 	default:
11545 		printf("%s: unknown PAT mode %#x for range 0x%016lx-0x%016lx\n",
11546 		    __func__, pat_idx, range->sva, eva);
11547 		mode = "??";
11548 		break;
11549 	}
11550 
11551 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %s %d %d %d\n",
11552 	    range->sva, eva,
11553 	    (range->attrs & X86_PG_RW) != 0 ? 'w' : '-',
11554 	    (range->attrs & pg_nx) != 0 ? '-' : 'x',
11555 	    (range->attrs & X86_PG_U) != 0 ? 'u' : 's',
11556 	    (range->attrs & X86_PG_G) != 0 ? 'g' : '-',
11557 	    mode, range->pdpes, range->pdes, range->ptes);
11558 
11559 	/* Reset to sentinel value. */
11560 	range->sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
11561 	    NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
11562 	    NPDEPG - 1, NPTEPG - 1);
11563 }
11564 
11565 /*
11566  * Determine whether the attributes specified by a page table entry match those
11567  * being tracked by the current range.  This is not quite as simple as a direct
11568  * flag comparison since some PAT modes have multiple representations.
11569  */
11570 static bool
11571 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
11572 {
11573 	pt_entry_t diff, mask;
11574 
11575 	mask = X86_PG_G | X86_PG_RW | X86_PG_U | X86_PG_PDE_CACHE | pg_nx;
11576 	diff = (range->attrs ^ attrs) & mask;
11577 	if (diff == 0)
11578 		return (true);
11579 	if ((diff & ~X86_PG_PDE_PAT) == 0 &&
11580 	    pmap_pat_index(kernel_pmap, range->attrs, true) ==
11581 	    pmap_pat_index(kernel_pmap, attrs, true))
11582 		return (true);
11583 	return (false);
11584 }
11585 
11586 static void
11587 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
11588     pt_entry_t attrs)
11589 {
11590 
11591 	memset(range, 0, sizeof(*range));
11592 	range->sva = va;
11593 	range->attrs = attrs;
11594 }
11595 
11596 /*
11597  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
11598  * those of the current run, dump the address range and its attributes, and
11599  * begin a new run.
11600  */
11601 static void
11602 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
11603     vm_offset_t va, pml4_entry_t pml4e, pdp_entry_t pdpe, pd_entry_t pde,
11604     pt_entry_t pte)
11605 {
11606 	pt_entry_t attrs;
11607 
11608 	attrs = pml4e & (X86_PG_RW | X86_PG_U | pg_nx);
11609 
11610 	attrs |= pdpe & pg_nx;
11611 	attrs &= pg_nx | (pdpe & (X86_PG_RW | X86_PG_U));
11612 	if ((pdpe & PG_PS) != 0) {
11613 		attrs |= pdpe & (X86_PG_G | X86_PG_PDE_CACHE);
11614 	} else if (pde != 0) {
11615 		attrs |= pde & pg_nx;
11616 		attrs &= pg_nx | (pde & (X86_PG_RW | X86_PG_U));
11617 	}
11618 	if ((pde & PG_PS) != 0) {
11619 		attrs |= pde & (X86_PG_G | X86_PG_PDE_CACHE);
11620 	} else if (pte != 0) {
11621 		attrs |= pte & pg_nx;
11622 		attrs &= pg_nx | (pte & (X86_PG_RW | X86_PG_U));
11623 		attrs |= pte & (X86_PG_G | X86_PG_PTE_CACHE);
11624 
11625 		/* Canonicalize by always using the PDE PAT bit. */
11626 		if ((attrs & X86_PG_PTE_PAT) != 0)
11627 			attrs ^= X86_PG_PDE_PAT | X86_PG_PTE_PAT;
11628 	}
11629 
11630 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
11631 		sysctl_kmaps_dump(sb, range, va);
11632 		sysctl_kmaps_reinit(range, va, attrs);
11633 	}
11634 }
11635 
11636 static int
11637 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
11638 {
11639 	struct pmap_kernel_map_range range;
11640 	struct sbuf sbuf, *sb;
11641 	pml4_entry_t pml4e;
11642 	pdp_entry_t *pdp, pdpe;
11643 	pd_entry_t *pd, pde;
11644 	pt_entry_t *pt, pte;
11645 	vm_offset_t sva;
11646 	vm_paddr_t pa;
11647 	int error, i, j, k, l;
11648 
11649 	error = sysctl_wire_old_buffer(req, 0);
11650 	if (error != 0)
11651 		return (error);
11652 	sb = &sbuf;
11653 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
11654 
11655 	/* Sentinel value. */
11656 	range.sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
11657 	    NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
11658 	    NPDEPG - 1, NPTEPG - 1);
11659 
11660 	/*
11661 	 * Iterate over the kernel page tables without holding the kernel pmap
11662 	 * lock.  Outside of the large map, kernel page table pages are never
11663 	 * freed, so at worst we will observe inconsistencies in the output.
11664 	 * Within the large map, ensure that PDP and PD page addresses are
11665 	 * valid before descending.
11666 	 */
11667 	for (sva = 0, i = pmap_pml4e_index(sva); i < NPML4EPG; i++) {
11668 		switch (i) {
11669 		case PML4PML4I:
11670 			sbuf_printf(sb, "\nRecursive map:\n");
11671 			break;
11672 		case DMPML4I:
11673 			sbuf_printf(sb, "\nDirect map:\n");
11674 			break;
11675 #ifdef KASAN
11676 		case KASANPML4I:
11677 			sbuf_printf(sb, "\nKASAN shadow map:\n");
11678 			break;
11679 #endif
11680 #ifdef KMSAN
11681 		case KMSANSHADPML4I:
11682 			sbuf_printf(sb, "\nKMSAN shadow map:\n");
11683 			break;
11684 		case KMSANORIGPML4I:
11685 			sbuf_printf(sb, "\nKMSAN origin map:\n");
11686 			break;
11687 #endif
11688 		case KPML4BASE:
11689 			sbuf_printf(sb, "\nKernel map:\n");
11690 			break;
11691 		case LMSPML4I:
11692 			sbuf_printf(sb, "\nLarge map:\n");
11693 			break;
11694 		}
11695 
11696 		/* Convert to canonical form. */
11697 		if (sva == 1ul << 47)
11698 			sva |= -1ul << 48;
11699 
11700 restart:
11701 		pml4e = kernel_pml4[i];
11702 		if ((pml4e & X86_PG_V) == 0) {
11703 			sva = rounddown2(sva, NBPML4);
11704 			sysctl_kmaps_dump(sb, &range, sva);
11705 			sva += NBPML4;
11706 			continue;
11707 		}
11708 		pa = pml4e & PG_FRAME;
11709 		pdp = (pdp_entry_t *)PHYS_TO_DMAP(pa);
11710 
11711 		for (j = pmap_pdpe_index(sva); j < NPDPEPG; j++) {
11712 			pdpe = pdp[j];
11713 			if ((pdpe & X86_PG_V) == 0) {
11714 				sva = rounddown2(sva, NBPDP);
11715 				sysctl_kmaps_dump(sb, &range, sva);
11716 				sva += NBPDP;
11717 				continue;
11718 			}
11719 			pa = pdpe & PG_FRAME;
11720 			if ((pdpe & PG_PS) != 0) {
11721 				sva = rounddown2(sva, NBPDP);
11722 				sysctl_kmaps_check(sb, &range, sva, pml4e, pdpe,
11723 				    0, 0);
11724 				range.pdpes++;
11725 				sva += NBPDP;
11726 				continue;
11727 			}
11728 			if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
11729 			    vm_phys_paddr_to_vm_page(pa) == NULL) {
11730 				/*
11731 				 * Page table pages for the large map may be
11732 				 * freed.  Validate the next-level address
11733 				 * before descending.
11734 				 */
11735 				goto restart;
11736 			}
11737 			pd = (pd_entry_t *)PHYS_TO_DMAP(pa);
11738 
11739 			for (k = pmap_pde_index(sva); k < NPDEPG; k++) {
11740 				pde = pd[k];
11741 				if ((pde & X86_PG_V) == 0) {
11742 					sva = rounddown2(sva, NBPDR);
11743 					sysctl_kmaps_dump(sb, &range, sva);
11744 					sva += NBPDR;
11745 					continue;
11746 				}
11747 				pa = pde & PG_FRAME;
11748 				if ((pde & PG_PS) != 0) {
11749 					sva = rounddown2(sva, NBPDR);
11750 					sysctl_kmaps_check(sb, &range, sva,
11751 					    pml4e, pdpe, pde, 0);
11752 					range.pdes++;
11753 					sva += NBPDR;
11754 					continue;
11755 				}
11756 				if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
11757 				    vm_phys_paddr_to_vm_page(pa) == NULL) {
11758 					/*
11759 					 * Page table pages for the large map
11760 					 * may be freed.  Validate the
11761 					 * next-level address before descending.
11762 					 */
11763 					goto restart;
11764 				}
11765 				pt = (pt_entry_t *)PHYS_TO_DMAP(pa);
11766 
11767 				for (l = pmap_pte_index(sva); l < NPTEPG; l++,
11768 				    sva += PAGE_SIZE) {
11769 					pte = pt[l];
11770 					if ((pte & X86_PG_V) == 0) {
11771 						sysctl_kmaps_dump(sb, &range,
11772 						    sva);
11773 						continue;
11774 					}
11775 					sysctl_kmaps_check(sb, &range, sva,
11776 					    pml4e, pdpe, pde, pte);
11777 					range.ptes++;
11778 				}
11779 			}
11780 		}
11781 	}
11782 
11783 	error = sbuf_finish(sb);
11784 	sbuf_delete(sb);
11785 	return (error);
11786 }
11787 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
11788     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
11789     NULL, 0, sysctl_kmaps, "A",
11790     "Dump kernel address layout");
11791 
11792 #ifdef DDB
11793 DB_SHOW_COMMAND(pte, pmap_print_pte)
11794 {
11795 	pmap_t pmap;
11796 	pml5_entry_t *pml5;
11797 	pml4_entry_t *pml4;
11798 	pdp_entry_t *pdp;
11799 	pd_entry_t *pde;
11800 	pt_entry_t *pte, PG_V;
11801 	vm_offset_t va;
11802 
11803 	if (!have_addr) {
11804 		db_printf("show pte addr\n");
11805 		return;
11806 	}
11807 	va = (vm_offset_t)addr;
11808 
11809 	if (kdb_thread != NULL)
11810 		pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
11811 	else
11812 		pmap = PCPU_GET(curpmap);
11813 
11814 	PG_V = pmap_valid_bit(pmap);
11815 	db_printf("VA 0x%016lx", va);
11816 
11817 	if (pmap_is_la57(pmap)) {
11818 		pml5 = pmap_pml5e(pmap, va);
11819 		db_printf(" pml5e 0x%016lx", *pml5);
11820 		if ((*pml5 & PG_V) == 0) {
11821 			db_printf("\n");
11822 			return;
11823 		}
11824 		pml4 = pmap_pml5e_to_pml4e(pml5, va);
11825 	} else {
11826 		pml4 = pmap_pml4e(pmap, va);
11827 	}
11828 	db_printf(" pml4e 0x%016lx", *pml4);
11829 	if ((*pml4 & PG_V) == 0) {
11830 		db_printf("\n");
11831 		return;
11832 	}
11833 	pdp = pmap_pml4e_to_pdpe(pml4, va);
11834 	db_printf(" pdpe 0x%016lx", *pdp);
11835 	if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
11836 		db_printf("\n");
11837 		return;
11838 	}
11839 	pde = pmap_pdpe_to_pde(pdp, va);
11840 	db_printf(" pde 0x%016lx", *pde);
11841 	if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
11842 		db_printf("\n");
11843 		return;
11844 	}
11845 	pte = pmap_pde_to_pte(pde, va);
11846 	db_printf(" pte 0x%016lx\n", *pte);
11847 }
11848 
11849 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
11850 {
11851 	vm_paddr_t a;
11852 
11853 	if (have_addr) {
11854 		a = (vm_paddr_t)addr;
11855 		db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
11856 	} else {
11857 		db_printf("show phys2dmap addr\n");
11858 	}
11859 }
11860 
11861 static void
11862 ptpages_show_page(int level, int idx, vm_page_t pg)
11863 {
11864 	db_printf("l %d i %d pg %p phys %#lx ref %x\n",
11865 	    level, idx, pg, VM_PAGE_TO_PHYS(pg), pg->ref_count);
11866 }
11867 
11868 static void
11869 ptpages_show_complain(int level, int idx, uint64_t pte)
11870 {
11871 	db_printf("l %d i %d pte %#lx\n", level, idx, pte);
11872 }
11873 
11874 static void
11875 ptpages_show_pml4(vm_page_t pg4, int num_entries, uint64_t PG_V)
11876 {
11877 	vm_page_t pg3, pg2, pg1;
11878 	pml4_entry_t *pml4;
11879 	pdp_entry_t *pdp;
11880 	pd_entry_t *pd;
11881 	int i4, i3, i2;
11882 
11883 	pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg4));
11884 	for (i4 = 0; i4 < num_entries; i4++) {
11885 		if ((pml4[i4] & PG_V) == 0)
11886 			continue;
11887 		pg3 = PHYS_TO_VM_PAGE(pml4[i4] & PG_FRAME);
11888 		if (pg3 == NULL) {
11889 			ptpages_show_complain(3, i4, pml4[i4]);
11890 			continue;
11891 		}
11892 		ptpages_show_page(3, i4, pg3);
11893 		pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg3));
11894 		for (i3 = 0; i3 < NPDPEPG; i3++) {
11895 			if ((pdp[i3] & PG_V) == 0)
11896 				continue;
11897 			pg2 = PHYS_TO_VM_PAGE(pdp[i3] & PG_FRAME);
11898 			if (pg3 == NULL) {
11899 				ptpages_show_complain(2, i3, pdp[i3]);
11900 				continue;
11901 			}
11902 			ptpages_show_page(2, i3, pg2);
11903 			pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg2));
11904 			for (i2 = 0; i2 < NPDEPG; i2++) {
11905 				if ((pd[i2] & PG_V) == 0)
11906 					continue;
11907 				pg1 = PHYS_TO_VM_PAGE(pd[i2] & PG_FRAME);
11908 				if (pg1 == NULL) {
11909 					ptpages_show_complain(1, i2, pd[i2]);
11910 					continue;
11911 				}
11912 				ptpages_show_page(1, i2, pg1);
11913 			}
11914 		}
11915 	}
11916 }
11917 
11918 DB_SHOW_COMMAND(ptpages, pmap_ptpages)
11919 {
11920 	pmap_t pmap;
11921 	vm_page_t pg;
11922 	pml5_entry_t *pml5;
11923 	uint64_t PG_V;
11924 	int i5;
11925 
11926 	if (have_addr)
11927 		pmap = (pmap_t)addr;
11928 	else
11929 		pmap = PCPU_GET(curpmap);
11930 
11931 	PG_V = pmap_valid_bit(pmap);
11932 
11933 	if (pmap_is_la57(pmap)) {
11934 		pml5 = pmap->pm_pmltop;
11935 		for (i5 = 0; i5 < NUPML5E; i5++) {
11936 			if ((pml5[i5] & PG_V) == 0)
11937 				continue;
11938 			pg = PHYS_TO_VM_PAGE(pml5[i5] & PG_FRAME);
11939 			if (pg == NULL) {
11940 				ptpages_show_complain(4, i5, pml5[i5]);
11941 				continue;
11942 			}
11943 			ptpages_show_page(4, i5, pg);
11944 			ptpages_show_pml4(pg, NPML4EPG, PG_V);
11945 		}
11946 	} else {
11947 		ptpages_show_pml4(PHYS_TO_VM_PAGE(DMAP_TO_PHYS(
11948 		    (vm_offset_t)pmap->pm_pmltop)), NUP4ML4E, PG_V);
11949 	}
11950 }
11951 #endif
11952