xref: /dragonfly/sys/platform/pc64/x86_64/pmap.c (revision 7d3e9a5b)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1994 David Greenman
5  * Copyright (c) 2003 Peter Wemm
6  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7  * Copyright (c) 2008, 2009 The DragonFly Project.
8  * Copyright (c) 2008, 2009 Jordan Gordeev.
9  * Copyright (c) 2011-2019 Matthew Dillon
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to Berkeley by
13  * the Systems Programming Group of the University of Utah Computer
14  * Science Department and William Jolitz of UUNET Technologies Inc.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 /*
45  * Manage physical address maps for x86-64 systems.
46  *
47  * Some notes:
48  *	- The 'M'odified bit is only applicable to terminal PTEs.
49  *
50  *	- The 'U'ser access bit can be set for higher-level PTEs as
51  *	  long as it isn't set for terminal PTEs for pages we don't
52  *	  want user access to.
53  */
54 
55 #if 0 /* JG */
56 #include "opt_pmap.h"
57 #endif
58 #include "opt_msgbuf.h"
59 
60 #include <sys/param.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/msgbuf.h>
64 #include <sys/vmmeter.h>
65 #include <sys/mman.h>
66 #include <sys/systm.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <sys/sysctl.h>
71 #include <sys/lock.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_pager.h>
79 #include <vm/vm_zone.h>
80 
81 #include <sys/thread2.h>
82 #include <sys/spinlock2.h>
83 #include <vm/vm_page2.h>
84 
85 #include <machine/cputypes.h>
86 #include <machine/cpu.h>
87 #include <machine/md_var.h>
88 #include <machine/specialreg.h>
89 #include <machine/smp.h>
90 #include <machine_base/apic/apicreg.h>
91 #include <machine/globaldata.h>
92 #include <machine/pmap.h>
93 #include <machine/pmap_inval.h>
94 
95 #include <ddb/ddb.h>
96 
97 #define PMAP_KEEP_PDIRS
98 
99 #if defined(DIAGNOSTIC)
100 #define PMAP_DIAGNOSTIC
101 #endif
102 
103 #define MINPV 2048
104 
105 /*
106  * pmap debugging will report who owns a pv lock when blocking.
107  */
108 #ifdef PMAP_DEBUG
109 
110 #define PMAP_DEBUG_DECL		, const char *func, int lineno
111 #define PMAP_DEBUG_ARGS		, __func__, __LINE__
112 #define PMAP_DEBUG_COPY		, func, lineno
113 
114 #define pv_get(pmap, pindex, pmarkp)	_pv_get(pmap, pindex, pmarkp	\
115 							PMAP_DEBUG_ARGS)
116 #define pv_lock(pv)			_pv_lock(pv			\
117 							PMAP_DEBUG_ARGS)
118 #define pv_hold_try(pv)			_pv_hold_try(pv			\
119 							PMAP_DEBUG_ARGS)
120 #define pv_alloc(pmap, pindex, isnewp)	_pv_alloc(pmap, pindex, isnewp	\
121 							PMAP_DEBUG_ARGS)
122 
123 #define pv_free(pv, pvp)		_pv_free(pv, pvp PMAP_DEBUG_ARGS)
124 
125 #else
126 
127 #define PMAP_DEBUG_DECL
128 #define PMAP_DEBUG_ARGS
129 #define PMAP_DEBUG_COPY
130 
131 #define pv_get(pmap, pindex, pmarkp)	_pv_get(pmap, pindex, pmarkp)
132 #define pv_lock(pv)			_pv_lock(pv)
133 #define pv_hold_try(pv)			_pv_hold_try(pv)
134 #define pv_alloc(pmap, pindex, isnewp)	_pv_alloc(pmap, pindex, isnewp)
135 #define pv_free(pv, pvp)		_pv_free(pv, pvp)
136 
137 #endif
138 
139 /*
140  * Get PDEs and PTEs for user/kernel address space
141  */
142 #define pdir_pde(m, v)		(m[(vm_offset_t)(v) >> PDRSHIFT])
143 
144 #define pmap_pde_v(pmap, pde)	\
145 		((*(pd_entry_t *)pde & pmap->pmap_bits[PG_V_IDX]) != 0)
146 #define pmap_pte_w(pmap, pte)	\
147 		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0)
148 #define pmap_pte_m(pmap, pte)	\
149 		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0)
150 #define pmap_pte_u(pmap, pte)	\
151 		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0)
152 #define pmap_pte_v(pmap, pte)	\
153 		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
154 
155 /*
156  * Given a map and a machine independent protection code,
157  * convert to a vax protection code.
158  */
159 #define pte_prot(m, p)		\
160 	(m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
161 static uint64_t protection_codes[PROTECTION_CODES_SIZE];
162 
163 /*
164  * Backing scan macros.  Note that in the use case 'ipte' is only a tentitive
165  * value and must be validated by a pmap_inval_smp_cmpset*() or equivalent
166  * function.
167  *
168  * NOTE: cpu_ccfence() is required to prevent excessive optmization of
169  *	 of the (ipte) variable.
170  *
171  * NOTE: We don't bother locking the backing object if it isn't mapped
172  *	 to anything (backing_list is empty).
173  *
174  * NOTE: For now guarantee an interlock via iobj->backing_lk if the
175  *	 object exists and do not shortcut the lock by checking to see
176  *	 if the list is empty first.
177  */
178 #define PMAP_PAGE_BACKING_SCAN(m, match_pmap, ipmap, iptep, ipte, iva)	\
179 	if (m->object) {						\
180 		vm_object_t iobj = m->object;				\
181 		vm_map_backing_t iba, next_ba;				\
182 		struct pmap *ipmap;					\
183 		pt_entry_t ipte;					\
184 		pt_entry_t *iptep;					\
185 		vm_offset_t iva;					\
186 		vm_pindex_t ipindex_start;				\
187 		vm_pindex_t ipindex_end;				\
188 									\
189 		lockmgr(&iobj->backing_lk, LK_SHARED);			\
190 		next_ba = TAILQ_FIRST(&iobj->backing_list);		\
191 		while ((iba = next_ba) != NULL) {			\
192 			next_ba = TAILQ_NEXT(iba, entry);		\
193 			ipmap = iba->pmap;				\
194 			if (match_pmap && ipmap != match_pmap)		\
195 				continue;				\
196 			ipindex_start = iba->offset >> PAGE_SHIFT;	\
197 			ipindex_end = ipindex_start +			\
198 				  ((iba->end - iba->start) >> PAGE_SHIFT); \
199 			if (m->pindex < ipindex_start ||		\
200 			    m->pindex >= ipindex_end) {			\
201 				continue;				\
202 			}						\
203 			iva = iba->start +				\
204 			      ((m->pindex - ipindex_start) << PAGE_SHIFT); \
205 			iptep = pmap_pte(ipmap, iva);			\
206 			if (iptep == NULL)				\
207 				continue;				\
208 			ipte = *iptep;					\
209 			cpu_ccfence();					\
210 			if (m->phys_addr != (ipte & PG_FRAME))		\
211 				continue;				\
212 
213 #define PMAP_PAGE_BACKING_RETRY						\
214 			{						\
215 				next_ba = iba;				\
216 				continue;				\
217 			}						\
218 
219 #define PMAP_PAGE_BACKING_DONE						\
220 		}							\
221 		lockmgr(&iobj->backing_lk, LK_RELEASE);			\
222 	}								\
223 
224 static struct pmap iso_pmap;
225 static struct pmap kernel_pmap_store;
226 struct pmap *kernel_pmap = &kernel_pmap_store;
227 
228 vm_paddr_t avail_start;		/* PA of first available physical page */
229 vm_paddr_t avail_end;		/* PA of last available physical page */
230 vm_offset_t virtual2_start;	/* cutout free area prior to kernel start */
231 vm_offset_t virtual2_end;
232 vm_offset_t virtual_start;	/* VA of first avail page (after kernel BSS) */
233 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
234 vm_offset_t KvaStart;		/* VA start of KVA space */
235 vm_offset_t KvaEnd;		/* VA end of KVA space (non-inclusive) */
236 vm_offset_t KvaSize;		/* max size of KVA space */
237 vm_offset_t DMapMaxAddress;
238 
239 /* Has pmap_init completed? */
240 __read_frequently static boolean_t pmap_initialized = FALSE;
241 //static int pgeflag;		/* PG_G or-in */
242 static uint64_t PatMsr;		/* value of MSR_PAT */
243 
244 static int ndmpdp;
245 static vm_paddr_t dmaplimit;
246 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
247 
248 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE];	/* PAT -> PG_ bits */
249 static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];	/* PAT -> PG_ bits */
250 
251 static uint64_t KPTbase;
252 static uint64_t KPTphys;
253 static uint64_t KPDphys;	/* phys addr of kernel level 2 */
254 static uint64_t KPDbase;	/* phys addr of kernel level 2 @ KERNBASE */
255 uint64_t KPDPphys;		/* phys addr of kernel level 3 */
256 uint64_t KPML4phys;		/* phys addr of kernel level 4 */
257 
258 static uint64_t DMPDphys;	/* phys addr of direct mapped level 2 */
259 static uint64_t DMPDPphys;	/* phys addr of direct mapped level 3 */
260 
261 /*
262  * Data for the pv entry allocation mechanism
263  */
264 __read_mostly static vm_zone_t pvzone;
265 __read_mostly static int pmap_pagedaemon_waken = 0;
266 static struct vm_zone pvzone_store;
267 static struct pv_entry *pvinit;
268 
269 /*
270  * All those kernel PT submaps that BSD is so fond of
271  */
272 pt_entry_t *CMAP1 = NULL;
273 caddr_t CADDR1 = NULL, ptvmmap = NULL;
274 static pt_entry_t *msgbufmap, *ptmmap;
275 struct msgbuf *msgbufp = NULL;
276 
277 /*
278  * PG_* bits for regular (x86) pmap.
279  */
280 __read_frequently static uint64_t pmap_bits_default[PG_BITS_SIZE] = {
281 	[TYPE_IDX]	= REGULAR_PMAP,
282 	[PG_V_IDX]	= X86_PG_V,
283 	[PG_RW_IDX]	= X86_PG_RW,
284 	[PG_U_IDX]	= X86_PG_U,
285 	[PG_A_IDX]	= X86_PG_A,
286 	[PG_M_IDX]	= X86_PG_M,
287 	[PG_PS_IDX]	= X86_PG_PS,
288 	[PG_G_IDX]	= X86_PG_G,
289 	[PG_W_IDX]	= X86_PG_AVAIL1,
290 	[PG_MANAGED_IDX] = X86_PG_AVAIL2,
291 	[PG_N_IDX]	= X86_PG_NC_PWT | X86_PG_NC_PCD,
292 	[PG_NX_IDX]	= X86_PG_NX,
293 };
294 
295 /*
296  * Crashdump maps.
297  */
298 static pt_entry_t *pt_crashdumpmap;
299 static caddr_t crashdumpmap;
300 
301 static int pmap_debug = 0;
302 SYSCTL_INT(_machdep, OID_AUTO, pmap_debug, CTLFLAG_RW,
303     &pmap_debug, 0, "Debug pmap's");
304 #ifdef PMAP_DEBUG2
305 static int pmap_enter_debug = 0;
306 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW,
307     &pmap_enter_debug, 0, "Debug pmap_enter's");
308 #endif
309 static int pmap_yield_count = 64;
310 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW,
311     &pmap_yield_count, 0, "Yield during init_pt/release");
312 static int pmap_fast_kernel_cpusync = 0;
313 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW,
314     &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible");
315 static int pmap_dynamic_delete = 0;
316 SYSCTL_INT(_machdep, OID_AUTO, pmap_dynamic_delete, CTLFLAG_RW,
317     &pmap_dynamic_delete, 0, "Dynamically delete PT/PD/PDPs");
318 static int pmap_lock_delay = 100;
319 SYSCTL_INT(_machdep, OID_AUTO, pmap_lock_delay, CTLFLAG_RW,
320     &pmap_lock_delay, 0, "Spin loops");
321 static int meltdown_mitigation = -1;
322 TUNABLE_INT("machdep.meltdown_mitigation", &meltdown_mitigation);
323 SYSCTL_INT(_machdep, OID_AUTO, meltdown_mitigation, CTLFLAG_RW,
324     &meltdown_mitigation, 0, "Userland pmap isolation");
325 
326 static int pmap_nx_enable = -1;		/* -1 = auto */
327 /* needs manual TUNABLE in early probe, see below */
328 SYSCTL_INT(_machdep, OID_AUTO, pmap_nx_enable, CTLFLAG_RD,
329     &pmap_nx_enable, 0,
330     "no-execute support (0=disabled, 1=w/READ, 2=w/READ & WRITE)");
331 
332 static int pmap_pv_debug = 50;
333 SYSCTL_INT(_machdep, OID_AUTO, pmap_pv_debug, CTLFLAG_RW,
334     &pmap_pv_debug, 0, "");
335 
336 static long vm_pmap_pv_entries;
337 SYSCTL_LONG(_vm, OID_AUTO, pmap_pv_entries, CTLFLAG_RD,
338     &vm_pmap_pv_entries, 0, "");
339 
340 /* Standard user access funtions */
341 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len,
342     size_t *lencopied);
343 extern int std_copyin (const void *udaddr, void *kaddr, size_t len);
344 extern int std_copyout (const void *kaddr, void *udaddr, size_t len);
345 extern int std_fubyte (const uint8_t *base);
346 extern int std_subyte (uint8_t *base, uint8_t byte);
347 extern int32_t std_fuword32 (const uint32_t *base);
348 extern int64_t std_fuword64 (const uint64_t *base);
349 extern int std_suword64 (uint64_t *base, uint64_t word);
350 extern int std_suword32 (uint32_t *base, int word);
351 extern uint32_t std_swapu32 (volatile uint32_t *base, uint32_t v);
352 extern uint64_t std_swapu64 (volatile uint64_t *base, uint64_t v);
353 extern uint32_t std_fuwordadd32 (volatile uint32_t *base, uint32_t v);
354 extern uint64_t std_fuwordadd64 (volatile uint64_t *base, uint64_t v);
355 
356 #if 0
357 static void pv_hold(pv_entry_t pv);
358 #endif
359 static int _pv_hold_try(pv_entry_t pv
360 				PMAP_DEBUG_DECL);
361 static void pv_drop(pv_entry_t pv);
362 static void _pv_lock(pv_entry_t pv
363 				PMAP_DEBUG_DECL);
364 static void pv_unlock(pv_entry_t pv);
365 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew
366 				PMAP_DEBUG_DECL);
367 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp
368 				PMAP_DEBUG_DECL);
369 static void _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL);
370 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex,
371 				vm_pindex_t **pmarkp, int *errorp);
372 static void pv_put(pv_entry_t pv);
373 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex);
374 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
375 		      pv_entry_t *pvpp);
376 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp,
377 			pmap_inval_bulk_t *bulk, int destroy);
378 static vm_page_t pmap_remove_pv_page(pv_entry_t pv, int clrpgbits);
379 static int pmap_release_pv(pv_entry_t pv, pv_entry_t pvp,
380 			pmap_inval_bulk_t *bulk);
381 
382 struct pmap_scan_info;
383 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
384 		      vm_pindex_t *pte_placemark, pv_entry_t pt_pv,
385 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
386 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
387 		      vm_pindex_t *pte_placemark, pv_entry_t pt_pv,
388 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
389 
390 static void x86_64_protection_init (void);
391 static void create_pagetables(vm_paddr_t *firstaddr);
392 static void pmap_remove_all (vm_page_t m);
393 static boolean_t pmap_testbit (vm_page_t m, int bit);
394 
395 static pt_entry_t *pmap_pte_quick (pmap_t pmap, vm_offset_t va);
396 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
397 
398 static void pmap_pinit_defaults(struct pmap *pmap);
399 static void pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark);
400 static void pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark);
401 
402 static int
403 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2)
404 {
405 	if (pv1->pv_pindex < pv2->pv_pindex)
406 		return(-1);
407 	if (pv1->pv_pindex > pv2->pv_pindex)
408 		return(1);
409 	return(0);
410 }
411 
412 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry,
413              pv_entry_compare, vm_pindex_t, pv_pindex);
414 
415 /*
416  * We have removed a managed pte.  The page might not be hard or soft-busied
417  * at this point so we have to be careful.
418  *
419  * If advanced mode is enabled we can clear PG_MAPPED/WRITEABLE only if
420  * MAPPEDMULTI is not set.  This must be done atomically against possible
421  * concurrent pmap_enter()s occurring at the same time.  If MULTI is set
422  * then the kernel may have to call vm_page_protect() later on to clean
423  * the bits up.  This is particularly important for kernel_map/kernel_object
424  * mappings due to the expense of scanning the kernel_object's vm_backing's.
425  *
426  * If advanced mode is not enabled we update our tracking counts and
427  * synchronize PG_MAPPED/WRITEABLE later on in pmap_mapped_sync().
428  */
429 static __inline
430 void
431 pmap_removed_pte(vm_page_t m, pt_entry_t pte)
432 {
433 	int flags;
434 	int nflags;
435 
436 	flags = m->flags;
437 	cpu_ccfence();
438 	while ((flags & PG_MAPPEDMULTI) == 0) {
439 		nflags = flags & ~(PG_MAPPED | PG_WRITEABLE);
440 		if (atomic_fcmpset_int(&m->flags, &flags, nflags))
441 			break;
442 	}
443 }
444 
445 /*
446  * Move the kernel virtual free pointer to the next
447  * 2MB.  This is used to help improve performance
448  * by using a large (2MB) page for much of the kernel
449  * (.text, .data, .bss)
450  */
451 static
452 vm_offset_t
453 pmap_kmem_choose(vm_offset_t addr)
454 {
455 	vm_offset_t newaddr = addr;
456 
457 	newaddr = roundup2(addr, NBPDR);
458 	return newaddr;
459 }
460 
461 /*
462  * Returns the pindex of a page table entry (representing a terminal page).
463  * There are NUPTE_TOTAL page table entries possible (a huge number)
464  *
465  * x86-64 has a 48-bit address space, where bit 47 is sign-extended out.
466  * We want to properly translate negative KVAs.
467  */
468 static __inline
469 vm_pindex_t
470 pmap_pte_pindex(vm_offset_t va)
471 {
472 	return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1));
473 }
474 
475 /*
476  * Returns the pindex of a page table.
477  */
478 static __inline
479 vm_pindex_t
480 pmap_pt_pindex(vm_offset_t va)
481 {
482 	return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1)));
483 }
484 
485 /*
486  * Returns the pindex of a page directory.
487  */
488 static __inline
489 vm_pindex_t
490 pmap_pd_pindex(vm_offset_t va)
491 {
492 	return (NUPTE_TOTAL + NUPT_TOTAL +
493 		((va >> PDPSHIFT) & (NUPD_TOTAL - 1)));
494 }
495 
496 static __inline
497 vm_pindex_t
498 pmap_pdp_pindex(vm_offset_t va)
499 {
500 	return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
501 		((va >> PML4SHIFT) & (NUPDP_TOTAL - 1)));
502 }
503 
504 static __inline
505 vm_pindex_t
506 pmap_pml4_pindex(void)
507 {
508 	return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL);
509 }
510 
511 /*
512  * Return various *clipped* indexes for a given VA.
513  *
514  * Returns the index of a PTE in a page table (PT), representing
515  * a terminal page.
516  */
517 static __inline
518 vm_pindex_t
519 pmap_pte_index(vm_offset_t va)
520 {
521 	return ((va >> PAGE_SHIFT) & ((1UL << NPTEPGSHIFT) - 1));
522 }
523 
524 /*
525  * Returns the index of a PDE in a page directory (PD) table, representing
526  * a page table (PT).
527  */
528 static __inline
529 vm_pindex_t
530 pmap_pt_index(vm_offset_t va)
531 {
532 	return ((va >> PDRSHIFT) & ((1UL << NPDEPGSHIFT) - 1));
533 }
534 
535 /*
536  * Returns the index of a PDPE in a page directory pointer (PDP) table,
537  * representing a page directory (PD) table.
538  */
539 static __inline
540 vm_pindex_t
541 pmap_pd_index(vm_offset_t va)
542 {
543 	return ((va >> PDPSHIFT) & ((1UL << NPDPEPGSHIFT) - 1));
544 }
545 
546 /*
547  * Returns the index of a PML4E in the PML4 table, representing a page
548  * directory pointer (PDP) table.
549  */
550 static __inline
551 vm_pindex_t
552 pmap_pdp_index(vm_offset_t va)
553 {
554 	return ((va >> PML4SHIFT) & ((1UL << NPML4EPGSHIFT) - 1));
555 }
556 
557 /*
558  * Of all the layers (PT, PD, PDP, PML4) the best one to cache is
559  * the PT layer.  This will speed up core pmap operations considerably.
560  *
561  * NOTE: The pmap spinlock does not need to be held but the passed-in pv
562  *	 must be in a known associated state (typically by being locked when
563  *	 the pmap spinlock isn't held).  We allow the race for that case.
564  *
565  * NOTE: pm_pvhint* is only accessed (read) with the spin-lock held, using
566  *	 cpu_ccfence() to prevent compiler optimizations from reloading the
567  *	 field.
568  */
569 static __inline
570 void
571 pv_cache(pmap_t pmap, pv_entry_t pv, vm_pindex_t pindex)
572 {
573 	if (pindex < pmap_pt_pindex(0)) {
574 		;
575 	} else if (pindex < pmap_pd_pindex(0)) {
576 		pmap->pm_pvhint_pt = pv;
577 	}
578 }
579 
580 /*
581  * Locate the requested pt_entry
582  */
583 static __inline
584 pv_entry_t
585 pv_entry_lookup(pmap_t pmap, vm_pindex_t pindex)
586 {
587 	pv_entry_t pv;
588 
589 	if (pindex < pmap_pt_pindex(0))
590 		return NULL;
591 #if 1
592 	if (pindex < pmap_pd_pindex(0))
593 		pv = pmap->pm_pvhint_pt;
594 	else
595 		pv = NULL;
596 	cpu_ccfence();
597 	if (pv == NULL || pv->pv_pmap != pmap) {
598 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
599 		if (pv)
600 			pv_cache(pmap, pv, pindex);
601 	} else if (pv->pv_pindex != pindex) {
602 		pv = pv_entry_rb_tree_RB_LOOKUP_REL(&pmap->pm_pvroot,
603 						    pindex, pv);
604 		if (pv)
605 			pv_cache(pmap, pv, pindex);
606 	}
607 #else
608 	pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
609 #endif
610 	return pv;
611 }
612 
613 /*
614  * pmap_pte_quick:
615  *
616  *	Super fast pmap_pte routine best used when scanning the pv lists.
617  *	This eliminates many course-grained invltlb calls.  Note that many of
618  *	the pv list scans are across different pmaps and it is very wasteful
619  *	to do an entire invltlb when checking a single mapping.
620  */
621 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
622 
623 static
624 pt_entry_t *
625 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
626 {
627 	return pmap_pte(pmap, va);
628 }
629 
630 /*
631  * The placemarker hash must be broken up into four zones so lock
632  * ordering semantics continue to work (e.g. pte, pt, pd, then pdp).
633  *
634  * Placemarkers are used to 'lock' page table indices that do not have
635  * a pv_entry.  This allows the pmap to support managed and unmanaged
636  * pages and shared page tables.
637  */
638 #define PM_PLACE_BASE	(PM_PLACEMARKS >> 2)
639 
640 static __inline
641 vm_pindex_t *
642 pmap_placemarker_hash(pmap_t pmap, vm_pindex_t pindex)
643 {
644 	int hi;
645 
646 	if (pindex < pmap_pt_pindex(0))		/* zone 0 - PTE */
647 		hi = 0;
648 	else if (pindex < pmap_pd_pindex(0))	/* zone 1 - PT */
649 		hi = PM_PLACE_BASE;
650 	else if (pindex < pmap_pdp_pindex(0))	/* zone 2 - PD */
651 		hi = PM_PLACE_BASE << 1;
652 	else					/* zone 3 - PDP (and PML4E) */
653 		hi = PM_PLACE_BASE | (PM_PLACE_BASE << 1);
654 	hi += pindex & (PM_PLACE_BASE - 1);
655 
656 	return (&pmap->pm_placemarks[hi]);
657 }
658 
659 
660 /*
661  * Generic procedure to index a pte from a pt, pd, or pdp.
662  *
663  * NOTE: Normally passed pindex as pmap_xx_index().  pmap_xx_pindex() is NOT
664  *	 a page table page index but is instead of PV lookup index.
665  */
666 static
667 void *
668 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex)
669 {
670 	pt_entry_t *pte;
671 
672 	pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m));
673 	return(&pte[pindex]);
674 }
675 
676 /*
677  * Return pointer to PDP slot in the PML4
678  */
679 static __inline
680 pml4_entry_t *
681 pmap_pdp(pmap_t pmap, vm_offset_t va)
682 {
683 	return (&pmap->pm_pml4[pmap_pdp_index(va)]);
684 }
685 
686 /*
687  * Return pointer to PD slot in the PDP given a pointer to the PDP
688  */
689 static __inline
690 pdp_entry_t *
691 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va)
692 {
693 	pdp_entry_t *pd;
694 
695 	pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME);
696 	return (&pd[pmap_pd_index(va)]);
697 }
698 
699 /*
700  * Return pointer to PD slot in the PDP.
701  */
702 static __inline
703 pdp_entry_t *
704 pmap_pd(pmap_t pmap, vm_offset_t va)
705 {
706 	pml4_entry_t *pdp;
707 
708 	pdp = pmap_pdp(pmap, va);
709 	if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0)
710 		return NULL;
711 	return (pmap_pdp_to_pd(*pdp, va));
712 }
713 
714 /*
715  * Return pointer to PT slot in the PD given a pointer to the PD
716  */
717 static __inline
718 pd_entry_t *
719 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va)
720 {
721 	pd_entry_t *pt;
722 
723 	pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME);
724 	return (&pt[pmap_pt_index(va)]);
725 }
726 
727 /*
728  * Return pointer to PT slot in the PD
729  *
730  * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs,
731  *		     so we cannot lookup the PD via the PDP.  Instead we
732  *		     must look it up via the pmap.
733  */
734 static __inline
735 pd_entry_t *
736 pmap_pt(pmap_t pmap, vm_offset_t va)
737 {
738 	pdp_entry_t *pd;
739 	pv_entry_t pv;
740 	vm_pindex_t pd_pindex;
741 	vm_paddr_t phys;
742 
743 	if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
744 		pd_pindex = pmap_pd_pindex(va);
745 		spin_lock_shared(&pmap->pm_spin);
746 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex);
747 		if (pv == NULL || pv->pv_m == NULL) {
748 			spin_unlock_shared(&pmap->pm_spin);
749 			return NULL;
750 		}
751 		phys = VM_PAGE_TO_PHYS(pv->pv_m);
752 		spin_unlock_shared(&pmap->pm_spin);
753 		return (pmap_pd_to_pt(phys, va));
754 	} else {
755 		pd = pmap_pd(pmap, va);
756 		if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0)
757 			 return NULL;
758 		return (pmap_pd_to_pt(*pd, va));
759 	}
760 }
761 
762 /*
763  * Return pointer to PTE slot in the PT given a pointer to the PT
764  */
765 static __inline
766 pt_entry_t *
767 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va)
768 {
769 	pt_entry_t *pte;
770 
771 	pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME);
772 	return (&pte[pmap_pte_index(va)]);
773 }
774 
775 /*
776  * Return pointer to PTE slot in the PT
777  */
778 static __inline
779 pt_entry_t *
780 pmap_pte(pmap_t pmap, vm_offset_t va)
781 {
782 	pd_entry_t *pt;
783 
784 	pt = pmap_pt(pmap, va);
785 	if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0)
786 		 return NULL;
787 	if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0)
788 		return ((pt_entry_t *)pt);
789 	return (pmap_pt_to_pte(*pt, va));
790 }
791 
792 /*
793  * Return address of PT slot in PD (KVM only)
794  *
795  * Cannot be used for user page tables because it might interfere with
796  * the shared page-table-page optimization (pmap_mmu_optimize).
797  */
798 static __inline
799 pd_entry_t *
800 vtopt(vm_offset_t va)
801 {
802 	uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
803 				  NPML4EPGSHIFT)) - 1);
804 
805 	return (PDmap + ((va >> PDRSHIFT) & mask));
806 }
807 
808 /*
809  * KVM - return address of PTE slot in PT
810  */
811 static __inline
812 pt_entry_t *
813 vtopte(vm_offset_t va)
814 {
815 	uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
816 				  NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
817 
818 	return (PTmap + ((va >> PAGE_SHIFT) & mask));
819 }
820 
821 /*
822  * Returns the physical address translation from va for a user address.
823  * (vm_paddr_t)-1 is returned on failure.
824  */
825 vm_paddr_t
826 uservtophys(vm_offset_t va)
827 {
828 	uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
829 				  NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
830 	vm_paddr_t pa;
831 	pt_entry_t pte;
832 	pmap_t pmap;
833 
834 	pmap = vmspace_pmap(mycpu->gd_curthread->td_lwp->lwp_vmspace);
835 	pa = (vm_paddr_t)-1;
836 	if (va < VM_MAX_USER_ADDRESS) {
837 		pte = kreadmem64(PTmap + ((va >> PAGE_SHIFT) & mask));
838 		if (pte & pmap->pmap_bits[PG_V_IDX])
839 			pa = (pte & PG_FRAME) | (va & PAGE_MASK);
840 	}
841 	return pa;
842 }
843 
844 static uint64_t
845 allocpages(vm_paddr_t *firstaddr, long n)
846 {
847 	uint64_t ret;
848 
849 	ret = *firstaddr;
850 	bzero((void *)ret, n * PAGE_SIZE);
851 	*firstaddr += n * PAGE_SIZE;
852 	return (ret);
853 }
854 
855 static
856 void
857 create_pagetables(vm_paddr_t *firstaddr)
858 {
859 	long i;		/* must be 64 bits */
860 	long nkpt_base;
861 	long nkpt_phys;
862 	long nkpd_phys;
863 	int j;
864 
865 	/*
866 	 * We are running (mostly) V=P at this point
867 	 *
868 	 * Calculate how many 1GB PD entries in our PDP pages are needed
869 	 * for the DMAP.  This is only allocated if the system does not
870 	 * support 1GB pages.  Otherwise ndmpdp is simply a count of
871 	 * the number of 1G terminal entries in our PDP pages are needed.
872 	 *
873 	 * NOTE: Maxmem is in pages
874 	 */
875 	ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
876 	if (ndmpdp < 4)		/* Minimum 4GB of DMAP */
877 		ndmpdp = 4;
878 
879 #if 0
880 	/*
881 	 * HACK XXX fix me - Some laptops map the EFI framebuffer in
882 	 * very high physical addresses and the DMAP winds up being too
883 	 * small.  The EFI framebuffer has to be mapped for the console
884 	 * very early and the DMAP is how it does it.
885 	 */
886 	if (ndmpdp < 512)	/* Minimum 512GB of DMAP */
887 		ndmpdp = 512;
888 #endif
889 
890 	KKASSERT(ndmpdp <= NDMPML4E * NPML4EPG);
891 	DMapMaxAddress = DMAP_MIN_ADDRESS +
892 			 ((ndmpdp * NPDEPG) << PDRSHIFT);
893 
894 	/*
895 	 * Starting at KERNBASE - map all 2G worth of page table pages.
896 	 * KERNBASE is offset -2G from the end of kvm.  This will accomodate
897 	 * all KVM allocations above KERNBASE, including the SYSMAPs below.
898 	 *
899 	 * We do this by allocating 2*512 PT pages.  Each PT page can map
900 	 * 2MB, for 2GB total.
901 	 */
902 	nkpt_base = (NPDPEPG - KPDPI) * NPTEPG;	/* typically 2 x 512 */
903 
904 	/*
905 	 * Starting at the beginning of kvm (VM_MIN_KERNEL_ADDRESS),
906 	 * Calculate how many page table pages we need to preallocate
907 	 * for early vm_map allocations.
908 	 *
909 	 * A few extra won't hurt, they will get used up in the running
910 	 * system.
911 	 *
912 	 * vm_page array
913 	 * initial pventry's
914 	 */
915 	nkpt_phys = howmany(Maxmem * sizeof(struct vm_page), NBPDR);
916 	nkpt_phys += howmany(Maxmem * sizeof(struct pv_entry), NBPDR);
917 	nkpt_phys += 128;	/* a few extra */
918 
919 	/*
920 	 * The highest value nkpd_phys can be set to is
921 	 * NKPDPE - (NPDPEPG - KPDPI) (i.e. NKPDPE - 2).
922 	 *
923 	 * Doing so would cause all PD pages to be pre-populated for
924 	 * a maximal KVM space (approximately 16*512 pages, or 32MB.
925 	 * We can save memory by not doing this.
926 	 */
927 	nkpd_phys = (nkpt_phys + NPDPEPG - 1) / NPDPEPG;
928 
929 	/*
930 	 * Allocate pages
931 	 *
932 	 * Normally NKPML4E=1-16 (1-16 kernel PDP page)
933 	 * Normally NKPDPE= NKPML4E*512-1 (511 min kernel PD pages)
934 	 *
935 	 * Only allocate enough PD pages
936 	 * NOTE: We allocate all kernel PD pages up-front, typically
937 	 *	 ~511G of KVM, requiring 511 PD pages.
938 	 */
939 	KPTbase = allocpages(firstaddr, nkpt_base);	/* KERNBASE to end */
940 	KPTphys = allocpages(firstaddr, nkpt_phys);	/* KVA start */
941 	KPML4phys = allocpages(firstaddr, 1);		/* recursive PML4 map */
942 	KPDPphys = allocpages(firstaddr, NKPML4E);	/* kernel PDP pages */
943 	KPDphys = allocpages(firstaddr, nkpd_phys);	/* kernel PD pages */
944 
945 	/*
946 	 * Alloc PD pages for the area starting at KERNBASE.
947 	 */
948 	KPDbase = allocpages(firstaddr, NPDPEPG - KPDPI);
949 
950 	/*
951 	 * Stuff for our DMAP.  Use 2MB pages even when 1GB pages
952 	 * are available in order to allow APU code to adjust page
953 	 * attributes on a fixed grain (see pmap_change_attr()).
954 	 */
955 	DMPDPphys = allocpages(firstaddr, NDMPML4E);
956 #if 1
957 	DMPDphys = allocpages(firstaddr, ndmpdp);
958 #else
959 	if ((amd_feature & AMDID_PAGE1GB) == 0)
960 		DMPDphys = allocpages(firstaddr, ndmpdp);
961 #endif
962 	dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
963 
964 	/*
965 	 * Fill in the underlying page table pages for the area around
966 	 * KERNBASE.  This remaps low physical memory to KERNBASE.
967 	 *
968 	 * Read-only from zero to physfree
969 	 * XXX not fully used, underneath 2M pages
970 	 */
971 	for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
972 		((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT;
973 		((pt_entry_t *)KPTbase)[i] |=
974 		    pmap_bits_default[PG_RW_IDX] |
975 		    pmap_bits_default[PG_V_IDX] |
976 		    pmap_bits_default[PG_G_IDX];
977 	}
978 
979 	/*
980 	 * Now map the initial kernel page tables.  One block of page
981 	 * tables is placed at the beginning of kernel virtual memory,
982 	 * and another block is placed at KERNBASE to map the kernel binary,
983 	 * data, bss, and initial pre-allocations.
984 	 */
985 	for (i = 0; i < nkpt_base; i++) {
986 		((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT);
987 		((pd_entry_t *)KPDbase)[i] |=
988 		    pmap_bits_default[PG_RW_IDX] |
989 		    pmap_bits_default[PG_V_IDX];
990 	}
991 	for (i = 0; i < nkpt_phys; i++) {
992 		((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
993 		((pd_entry_t *)KPDphys)[i] |=
994 		    pmap_bits_default[PG_RW_IDX] |
995 		    pmap_bits_default[PG_V_IDX];
996 	}
997 
998 	/*
999 	 * Map from zero to end of allocations using 2M pages as an
1000 	 * optimization.  This will bypass some of the KPTBase pages
1001 	 * above in the KERNBASE area.
1002 	 */
1003 	for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
1004 		((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT;
1005 		((pd_entry_t *)KPDbase)[i] |=
1006 		    pmap_bits_default[PG_RW_IDX] |
1007 		    pmap_bits_default[PG_V_IDX] |
1008 		    pmap_bits_default[PG_PS_IDX] |
1009 		    pmap_bits_default[PG_G_IDX];
1010 	}
1011 
1012 	/*
1013 	 * Load PD addresses into the PDP pages for primary KVA space to
1014 	 * cover existing page tables.  PD's for KERNBASE are handled in
1015 	 * the next loop.
1016 	 *
1017 	 * expected to pre-populate all of its PDs.  See NKPDPE in vmparam.h.
1018 	 */
1019 	for (i = 0; i < nkpd_phys; i++) {
1020 		((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] =
1021 				KPDphys + (i << PAGE_SHIFT);
1022 		((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] |=
1023 		    pmap_bits_default[PG_RW_IDX] |
1024 		    pmap_bits_default[PG_V_IDX] |
1025 		    pmap_bits_default[PG_A_IDX];
1026 	}
1027 
1028 	/*
1029 	 * Load PDs for KERNBASE to the end
1030 	 */
1031 	i = (NKPML4E - 1) * NPDPEPG + KPDPI;
1032 	for (j = 0; j < NPDPEPG - KPDPI; ++j) {
1033 		((pdp_entry_t *)KPDPphys)[i + j] =
1034 				KPDbase + (j << PAGE_SHIFT);
1035 		((pdp_entry_t *)KPDPphys)[i + j] |=
1036 		    pmap_bits_default[PG_RW_IDX] |
1037 		    pmap_bits_default[PG_V_IDX] |
1038 		    pmap_bits_default[PG_A_IDX];
1039 	}
1040 
1041 	/*
1042 	 * Now set up the direct map space using either 2MB or 1GB pages
1043 	 * Preset PG_M and PG_A because demotion expects it.
1044 	 *
1045 	 * When filling in entries in the PD pages make sure any excess
1046 	 * entries are set to zero as we allocated enough PD pages
1047 	 *
1048 	 * Stuff for our DMAP.  Use 2MB pages even when 1GB pages
1049 	 * are available in order to allow APU code to adjust page
1050 	 * attributes on a fixed grain (see pmap_change_attr()).
1051 	 */
1052 #if 0
1053 	if ((amd_feature & AMDID_PAGE1GB) == 0)
1054 #endif
1055 	{
1056 		/*
1057 		 * Use 2MB pages
1058 		 */
1059 		for (i = 0; i < NPDEPG * ndmpdp; i++) {
1060 			((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT;
1061 			((pd_entry_t *)DMPDphys)[i] |=
1062 			    pmap_bits_default[PG_RW_IDX] |
1063 			    pmap_bits_default[PG_V_IDX] |
1064 			    pmap_bits_default[PG_PS_IDX] |
1065 			    pmap_bits_default[PG_G_IDX] |
1066 			    pmap_bits_default[PG_M_IDX] |
1067 			    pmap_bits_default[PG_A_IDX];
1068 		}
1069 
1070 		/*
1071 		 * And the direct map space's PDP
1072 		 */
1073 		for (i = 0; i < ndmpdp; i++) {
1074 			((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
1075 							(i << PAGE_SHIFT);
1076 			((pdp_entry_t *)DMPDPphys)[i] |=
1077 			    pmap_bits_default[PG_RW_IDX] |
1078 			    pmap_bits_default[PG_V_IDX] |
1079 			    pmap_bits_default[PG_A_IDX];
1080 		}
1081 	}
1082 #if 0
1083 	else {
1084 		/*
1085 		 * 1GB pages
1086 		 */
1087 		for (i = 0; i < ndmpdp; i++) {
1088 			((pdp_entry_t *)DMPDPphys)[i] =
1089 						(vm_paddr_t)i << PDPSHIFT;
1090 			((pdp_entry_t *)DMPDPphys)[i] |=
1091 			    pmap_bits_default[PG_RW_IDX] |
1092 			    pmap_bits_default[PG_V_IDX] |
1093 			    pmap_bits_default[PG_PS_IDX] |
1094 			    pmap_bits_default[PG_G_IDX] |
1095 			    pmap_bits_default[PG_M_IDX] |
1096 			    pmap_bits_default[PG_A_IDX];
1097 		}
1098 	}
1099 #endif
1100 
1101 	/* And recursively map PML4 to itself in order to get PTmap */
1102 	((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
1103 	((pdp_entry_t *)KPML4phys)[PML4PML4I] |=
1104 	    pmap_bits_default[PG_RW_IDX] |
1105 	    pmap_bits_default[PG_V_IDX] |
1106 	    pmap_bits_default[PG_A_IDX];
1107 
1108 	/*
1109 	 * Connect the Direct Map slots up to the PML4
1110 	 */
1111 	for (j = 0; j < NDMPML4E; ++j) {
1112 		((pdp_entry_t *)KPML4phys)[DMPML4I + j] =
1113 		    (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
1114 		    pmap_bits_default[PG_RW_IDX] |
1115 		    pmap_bits_default[PG_V_IDX] |
1116 		    pmap_bits_default[PG_A_IDX];
1117 	}
1118 
1119 	/*
1120 	 * Connect the KVA slot up to the PML4
1121 	 */
1122 	for (j = 0; j < NKPML4E; ++j) {
1123 		((pdp_entry_t *)KPML4phys)[KPML4I + j] =
1124 		    KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT);
1125 		((pdp_entry_t *)KPML4phys)[KPML4I + j] |=
1126 		    pmap_bits_default[PG_RW_IDX] |
1127 		    pmap_bits_default[PG_V_IDX] |
1128 		    pmap_bits_default[PG_A_IDX];
1129 	}
1130 	cpu_mfence();
1131 	cpu_invltlb();
1132 }
1133 
1134 /*
1135  *	Bootstrap the system enough to run with virtual memory.
1136  *
1137  *	On x86_64 this is called after mapping has already been enabled
1138  *	and just syncs the pmap module with what has already been done.
1139  *	[We can't call it easily with mapping off since the kernel is not
1140  *	mapped with PA == VA, hence we would have to relocate every address
1141  *	from the linked base (virtual) address "KERNBASE" to the actual
1142  *	(physical) address starting relative to 0]
1143  */
1144 void
1145 pmap_bootstrap(vm_paddr_t *firstaddr)
1146 {
1147 	vm_offset_t va;
1148 	pt_entry_t *pte;
1149 	int i;
1150 
1151 	KvaStart = VM_MIN_KERNEL_ADDRESS;
1152 	KvaEnd = VM_MAX_KERNEL_ADDRESS;
1153 	KvaSize = KvaEnd - KvaStart;
1154 
1155 	avail_start = *firstaddr;
1156 
1157 	/*
1158 	 * Create an initial set of page tables to run the kernel in.
1159 	 */
1160 	create_pagetables(firstaddr);
1161 
1162 	virtual2_start = KvaStart;
1163 	virtual2_end = PTOV_OFFSET;
1164 
1165 	virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
1166 	virtual_start = pmap_kmem_choose(virtual_start);
1167 
1168 	virtual_end = VM_MAX_KERNEL_ADDRESS;
1169 
1170 	/* XXX do %cr0 as well */
1171 	load_cr4(rcr4() | CR4_PGE | CR4_PSE);
1172 	load_cr3(KPML4phys);
1173 
1174 	/*
1175 	 * Initialize protection array.
1176 	 */
1177 	x86_64_protection_init();
1178 
1179 	/*
1180 	 * The kernel's pmap is statically allocated so we don't have to use
1181 	 * pmap_create, which is unlikely to work correctly at this part of
1182 	 * the boot sequence (XXX and which no longer exists).
1183 	 */
1184 	kernel_pmap->pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
1185 	kernel_pmap->pm_count = 1;
1186 	CPUMASK_ASSALLONES(kernel_pmap->pm_active);
1187 	RB_INIT(&kernel_pmap->pm_pvroot);
1188 	spin_init(&kernel_pmap->pm_spin, "pmapbootstrap");
1189 	for (i = 0; i < PM_PLACEMARKS; ++i)
1190 		kernel_pmap->pm_placemarks[i] = PM_NOPLACEMARK;
1191 
1192 	/*
1193 	 * Reserve some special page table entries/VA space for temporary
1194 	 * mapping of pages.
1195 	 */
1196 #define	SYSMAP(c, p, v, n)	\
1197 	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1198 
1199 	va = virtual_start;
1200 	pte = vtopte(va);
1201 
1202 	/*
1203 	 * CMAP1/CMAP2 are used for zeroing and copying pages.
1204 	 */
1205 	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
1206 
1207 	/*
1208 	 * Crashdump maps.
1209 	 */
1210 	SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
1211 
1212 	/*
1213 	 * ptvmmap is used for reading arbitrary physical pages via
1214 	 * /dev/mem.
1215 	 */
1216 	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
1217 
1218 	/*
1219 	 * msgbufp is used to map the system message buffer.
1220 	 * XXX msgbufmap is not used.
1221 	 */
1222 	SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
1223 	       atop(round_page(MSGBUF_SIZE)))
1224 
1225 	virtual_start = va;
1226 	virtual_start = pmap_kmem_choose(virtual_start);
1227 
1228 	*CMAP1 = 0;
1229 
1230 	/*
1231 	 * PG_G is terribly broken on SMP because we IPI invltlb's in some
1232 	 * cases rather then invl1pg.  Actually, I don't even know why it
1233 	 * works under UP because self-referential page table mappings
1234 	 */
1235 //	pgeflag = 0;
1236 
1237 	cpu_invltlb();
1238 
1239 	/* Initialize the PAT MSR */
1240 	pmap_init_pat();
1241 	pmap_pinit_defaults(kernel_pmap);
1242 
1243 	TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync",
1244 			  &pmap_fast_kernel_cpusync);
1245 
1246 }
1247 
1248 /*
1249  * Setup the PAT MSR.
1250  */
1251 void
1252 pmap_init_pat(void)
1253 {
1254 	uint64_t pat_msr;
1255 	u_long cr0, cr4;
1256 	int i;
1257 
1258 	/*
1259 	 * Default values mapping PATi,PCD,PWT bits at system reset.
1260 	 * The default values effectively ignore the PATi bit by
1261 	 * repeating the encodings for 0-3 in 4-7, and map the PCD
1262 	 * and PWT bit combinations to the expected PAT types.
1263 	 */
1264 	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |	/* 000 */
1265 		  PAT_VALUE(1, PAT_WRITE_THROUGH) |	/* 001 */
1266 		  PAT_VALUE(2, PAT_UNCACHED) |		/* 010 */
1267 		  PAT_VALUE(3, PAT_UNCACHEABLE) |	/* 011 */
1268 		  PAT_VALUE(4, PAT_WRITE_BACK) |	/* 100 */
1269 		  PAT_VALUE(5, PAT_WRITE_THROUGH) |	/* 101 */
1270 		  PAT_VALUE(6, PAT_UNCACHED) |		/* 110 */
1271 		  PAT_VALUE(7, PAT_UNCACHEABLE);	/* 111 */
1272 	pat_pte_index[PAT_WRITE_BACK]	= 0;
1273 	pat_pte_index[PAT_WRITE_THROUGH]= 0         | X86_PG_NC_PWT;
1274 	pat_pte_index[PAT_UNCACHED]	= X86_PG_NC_PCD;
1275 	pat_pte_index[PAT_UNCACHEABLE]	= X86_PG_NC_PCD | X86_PG_NC_PWT;
1276 	pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE];
1277 	pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE];
1278 
1279 	if (cpu_feature & CPUID_PAT) {
1280 		/*
1281 		 * If we support the PAT then set-up entries for
1282 		 * WRITE_PROTECTED and WRITE_COMBINING using bit patterns
1283 		 * 5 and 6.
1284 		 */
1285 		pat_msr = (pat_msr & ~PAT_MASK(5)) |
1286 			  PAT_VALUE(5, PAT_WRITE_PROTECTED);
1287 		pat_msr = (pat_msr & ~PAT_MASK(6)) |
1288 			  PAT_VALUE(6, PAT_WRITE_COMBINING);
1289 		pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | X86_PG_NC_PWT;
1290 		pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PCD;
1291 
1292 		/*
1293 		 * Then enable the PAT
1294 		 */
1295 
1296 		/* Disable PGE. */
1297 		cr4 = rcr4();
1298 		load_cr4(cr4 & ~CR4_PGE);
1299 
1300 		/* Disable caches (CD = 1, NW = 0). */
1301 		cr0 = rcr0();
1302 		load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1303 
1304 		/* Flushes caches and TLBs. */
1305 		wbinvd();
1306 		cpu_invltlb();
1307 
1308 		/* Update PAT and index table. */
1309 		wrmsr(MSR_PAT, pat_msr);
1310 
1311 		/* Flush caches and TLBs again. */
1312 		wbinvd();
1313 		cpu_invltlb();
1314 
1315 		/* Restore caches and PGE. */
1316 		load_cr0(cr0);
1317 		load_cr4(cr4);
1318 		PatMsr = pat_msr;
1319 	}
1320 
1321 	for (i = 0; i < 8; ++i) {
1322 		pt_entry_t pte;
1323 
1324 		pte = pat_pte_index[i];
1325 		if (pte & X86_PG_PTE_PAT) {
1326 			pte &= ~X86_PG_PTE_PAT;
1327 			pte |= X86_PG_PDE_PAT;
1328 		}
1329 		pat_pde_index[i] = pte;
1330 	}
1331 }
1332 
1333 /*
1334  * Set 4mb pdir for mp startup
1335  */
1336 void
1337 pmap_set_opt(void)
1338 {
1339 	if (cpu_feature & CPUID_PSE) {
1340 		load_cr4(rcr4() | CR4_PSE);
1341 		if (mycpu->gd_cpuid == 0) 	/* only on BSP */
1342 			cpu_invltlb();
1343 	}
1344 
1345 	/*
1346 	 * Check for SMAP support and enable if available.  Must be done
1347 	 * after cr3 is loaded, and on all cores.
1348 	 */
1349 	if (cpu_stdext_feature & CPUID_STDEXT_SMAP) {
1350 		load_cr4(rcr4() | CR4_SMAP);
1351 	}
1352 	if (cpu_stdext_feature & CPUID_STDEXT_SMEP) {
1353 		load_cr4(rcr4() | CR4_SMEP);
1354 	}
1355 }
1356 
1357 /*
1358  * SMAP is just a processor flag, but SMEP can only be enabled
1359  * and disabled via CR4.  We still use the processor flag to
1360  * disable SMAP because the page-fault/trap code checks it, in
1361  * order to allow a page-fault to actually occur.
1362  */
1363 void
1364 smap_smep_disable(void)
1365 {
1366 	/*
1367 	 * disable SMAP.  This also bypasses a software failsafe check
1368 	 * in the trap() code.
1369 	 */
1370 	smap_open();
1371 
1372 	/*
1373 	 * Also needed to bypass a software failsafe check in the trap()
1374 	 * code and allow the userspace address fault from kernel mode
1375 	 * to proceed.
1376 	 *
1377 	 * Note that This will not reload %rip because pcb_onfault_rsp will
1378 	 * not match.  Just setting it to non-NULL is sufficient to bypass
1379 	 * the checks.
1380 	 */
1381 	curthread->td_pcb->pcb_onfault = (void *)1;
1382 
1383 	/*
1384 	 * Disable SMEP (requires modifying cr4)
1385 	 */
1386 	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1387 		load_cr4(rcr4() & ~CR4_SMEP);
1388 }
1389 
1390 void
1391 smap_smep_enable(void)
1392 {
1393 	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1394 		load_cr4(rcr4() | CR4_SMEP);
1395 	curthread->td_pcb->pcb_onfault = NULL;
1396 	smap_close();
1397 }
1398 
1399 /*
1400  * Early initialization of the pmap module.
1401  *
1402  * Called by vm_init, to initialize any structures that the pmap
1403  * system needs to map virtual memory.  pmap_init has been enhanced to
1404  * support in a fairly consistant way, discontiguous physical memory.
1405  */
1406 void
1407 pmap_init(void)
1408 {
1409 	vm_pindex_t initial_pvs;
1410 	vm_pindex_t i;
1411 
1412 	/*
1413 	 * Allocate memory for random pmap data structures.  Includes the
1414 	 * pv_head_table.
1415 	 */
1416 	for (i = 0; i < vm_page_array_size; i++) {
1417 		vm_page_t m;
1418 
1419 		m = &vm_page_array[i];
1420 		m->md.interlock_count = 0;
1421 	}
1422 
1423 	/*
1424 	 * init the pv free list
1425 	 */
1426 	initial_pvs = vm_page_array_size;
1427 	if (initial_pvs < MINPV)
1428 		initial_pvs = MINPV;
1429 	pvzone = &pvzone_store;
1430 	pvinit = (void *)kmem_alloc(kernel_map,
1431 				    initial_pvs * sizeof (struct pv_entry),
1432 				    VM_SUBSYS_PVENTRY);
1433 	zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
1434 		  pvinit, initial_pvs);
1435 
1436 	/*
1437 	 * Now it is safe to enable pv_table recording.
1438 	 */
1439 	pmap_initialized = TRUE;
1440 }
1441 
1442 /*
1443  * Initialize the address space (zone) for the pv_entries.  Set a
1444  * high water mark so that the system can recover from excessive
1445  * numbers of pv entries.
1446  *
1447  * Also create the kernel page table template for isolated user
1448  * pmaps.
1449  */
1450 static void pmap_init_iso_range(vm_offset_t base, size_t bytes);
1451 static void pmap_init2_iso_pmap(void);
1452 #if 0
1453 static void dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base);
1454 #endif
1455 
1456 void
1457 pmap_init2(void)
1458 {
1459 	vm_pindex_t entry_max;
1460 
1461 	/*
1462 	 * We can significantly reduce pv_entry_max from historical
1463 	 * levels because pv_entry's are no longer use for PTEs at the
1464 	 * leafs.  This prevents excessive pcpu caching on many-core
1465 	 * boxes (even with the further '/ 16' done in zinitna().
1466 	 *
1467 	 * Remember, however, that processes can share physical pages
1468 	 * with each process still needing the pdp/pd/pt infrstructure
1469 	 * (which still use pv_entry's).  And don't just assume that
1470 	 * every PT will be completely filled up.  So don't make it
1471 	 * too small.
1472 	 */
1473 	entry_max = maxproc * 32 + vm_page_array_size / 16;
1474 	TUNABLE_LONG_FETCH("vm.pmap.pv_entries", &entry_max);
1475 	vm_pmap_pv_entries = entry_max;
1476 
1477 	/*
1478 	 * Subtract out pages already installed in the zone (hack)
1479 	 */
1480 	if (entry_max <= MINPV)
1481 		entry_max = MINPV;
1482 
1483 	zinitna(pvzone, NULL, 0, entry_max, ZONE_INTERRUPT);
1484 
1485 	/*
1486 	 * Enable dynamic deletion of empty higher-level page table pages
1487 	 * by default only if system memory is < 8GB (use 7GB for slop).
1488 	 * This can save a little memory, but imposes significant
1489 	 * performance overhead for things like bulk builds, and for programs
1490 	 * which do a lot of memory mapping and memory unmapping.
1491 	 */
1492 #if 0
1493 	if (pmap_dynamic_delete < 0) {
1494 		if (vmstats.v_page_count < 7LL * 1024 * 1024 * 1024 / PAGE_SIZE)
1495 			pmap_dynamic_delete = 1;
1496 		else
1497 			pmap_dynamic_delete = 0;
1498 	}
1499 #endif
1500 	/*
1501 	 * Disable so vm_map_backing iterations do not race
1502 	 */
1503 	pmap_dynamic_delete = 0;
1504 
1505 	/*
1506 	 * Automatic detection of Intel meltdown bug requiring user/kernel
1507 	 * mmap isolation.
1508 	 *
1509 	 * Currently there are so many Intel cpu's impacted that its better
1510 	 * to whitelist future Intel CPUs.  Most? AMD cpus are not impacted
1511 	 * so the default is off for AMD.
1512 	 */
1513 	if (meltdown_mitigation < 0) {
1514 		if (cpu_vendor_id == CPU_VENDOR_INTEL) {
1515 			meltdown_mitigation = 1;
1516 			if (cpu_ia32_arch_caps & IA32_ARCH_CAP_RDCL_NO)
1517 				meltdown_mitigation = 0;
1518 		} else {
1519 			meltdown_mitigation = 0;
1520 		}
1521 	}
1522 	if (meltdown_mitigation) {
1523 		kprintf("machdep.meltdown_mitigation enabled to "
1524 			"protect against (mostly Intel) meltdown bug\n");
1525 		kprintf("system call performance will be impacted\n");
1526 	}
1527 
1528 	pmap_init2_iso_pmap();
1529 }
1530 
1531 /*
1532  * Create the isolation pmap template.  Once created, the template
1533  * is static and its PML4e entries are used to populate the
1534  * kernel portion of any isolated user pmaps.
1535  *
1536  * Our isolation pmap must contain:
1537  * (1) trampoline area for all cpus
1538  * (2) common_tss area for all cpus (its part of the trampoline area now)
1539  * (3) IDT for all cpus
1540  * (4) GDT for all cpus
1541  */
1542 static void
1543 pmap_init2_iso_pmap(void)
1544 {
1545 	int n;
1546 
1547 	if (bootverbose)
1548 		kprintf("Initialize isolation pmap\n");
1549 
1550 	/*
1551 	 * Try to use our normal API calls to make this easier.  We have
1552 	 * to scrap the shadowed kernel PDPs pmap_pinit() creates for our
1553 	 * iso_pmap.
1554 	 */
1555 	pmap_pinit(&iso_pmap);
1556 	bzero(iso_pmap.pm_pml4, PAGE_SIZE);
1557 
1558 	/*
1559 	 * Install areas needed by the cpu and trampoline.
1560 	 */
1561 	for (n = 0; n < ncpus; ++n) {
1562 		struct privatespace *ps;
1563 
1564 		ps = CPU_prvspace[n];
1565 		pmap_init_iso_range((vm_offset_t)&ps->trampoline,
1566 				    sizeof(ps->trampoline));
1567 		pmap_init_iso_range((vm_offset_t)&ps->dblstack,
1568 				    sizeof(ps->dblstack));
1569 		pmap_init_iso_range((vm_offset_t)&ps->dbgstack,
1570 				    sizeof(ps->dbgstack));
1571 		pmap_init_iso_range((vm_offset_t)&ps->common_tss,
1572 				    sizeof(ps->common_tss));
1573 		pmap_init_iso_range(r_idt_arr[n].rd_base,
1574 				    r_idt_arr[n].rd_limit + 1);
1575 		pmap_init_iso_range((register_t)ps->mdglobaldata.gd_gdt,
1576 				    MAXGDT_LIMIT);
1577 	}
1578 	pmap_init_iso_range((vm_offset_t)(int *)btext,
1579 			    (vm_offset_t)(int *)etext -
1580 			     (vm_offset_t)(int *)btext);
1581 
1582 #if 0
1583 	kprintf("Dump iso_pmap:\n");
1584 	dump_pmap(&iso_pmap, vtophys(iso_pmap.pm_pml4), 0, 0);
1585 	kprintf("\nDump kernel_pmap:\n");
1586 	dump_pmap(kernel_pmap, vtophys(kernel_pmap->pm_pml4), 0, 0);
1587 #endif
1588 }
1589 
1590 /*
1591  * This adds a kernel virtual address range to the isolation pmap.
1592  */
1593 static void
1594 pmap_init_iso_range(vm_offset_t base, size_t bytes)
1595 {
1596 	pv_entry_t pv;
1597 	pv_entry_t pvp;
1598 	pt_entry_t *ptep;
1599 	pt_entry_t pte;
1600 	vm_offset_t va;
1601 
1602 	if (bootverbose) {
1603 		kprintf("isolate %016jx-%016jx (%zd)\n",
1604 			base, base + bytes, bytes);
1605 	}
1606 	va = base & ~(vm_offset_t)PAGE_MASK;
1607 	while (va < base + bytes) {
1608 		if ((va & PDRMASK) == 0 && va + NBPDR <= base + bytes &&
1609 		    (ptep = pmap_pt(kernel_pmap, va)) != NULL &&
1610 		    (*ptep & kernel_pmap->pmap_bits[PG_V_IDX]) &&
1611 		    (*ptep & kernel_pmap->pmap_bits[PG_PS_IDX])) {
1612 			/*
1613 			 * Use 2MB pages if possible
1614 			 */
1615 			pte = *ptep;
1616 			pv = pmap_allocpte(&iso_pmap, pmap_pd_pindex(va), &pvp);
1617 			ptep = pv_pte_lookup(pv, (va >> PDRSHIFT) & 511);
1618 			*ptep = pte;
1619 			va += NBPDR;
1620 		} else {
1621 			/*
1622 			 * Otherwise use 4KB pages
1623 			 */
1624 			pv = pmap_allocpte(&iso_pmap, pmap_pt_pindex(va), &pvp);
1625 			ptep = pv_pte_lookup(pv, (va >> PAGE_SHIFT) & 511);
1626 			*ptep = vtophys(va) | kernel_pmap->pmap_bits[PG_RW_IDX] |
1627 					      kernel_pmap->pmap_bits[PG_V_IDX] |
1628 					      kernel_pmap->pmap_bits[PG_A_IDX] |
1629 					      kernel_pmap->pmap_bits[PG_M_IDX];
1630 
1631 			va += PAGE_SIZE;
1632 		}
1633 		pv_put(pv);
1634 		pv_put(pvp);
1635 	}
1636 }
1637 
1638 #if 0
1639 /*
1640  * Useful debugging pmap dumper, do not remove (#if 0 when not in use)
1641  */
1642 static
1643 void
1644 dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base)
1645 {
1646 	pt_entry_t *ptp;
1647 	vm_offset_t incr;
1648 	int i;
1649 
1650 	switch(level) {
1651 	case 0:					/* PML4e page, 512G entries */
1652 		incr = (1LL << 48) / 512;
1653 		break;
1654 	case 1:					/* PDP page, 1G entries */
1655 		incr = (1LL << 39) / 512;
1656 		break;
1657 	case 2:					/* PD page, 2MB entries */
1658 		incr = (1LL << 30) / 512;
1659 		break;
1660 	case 3:					/* PT page, 4KB entries */
1661 		incr = (1LL << 21) / 512;
1662 		break;
1663 	default:
1664 		incr = 0;
1665 		break;
1666 	}
1667 
1668 	if (level == 0)
1669 		kprintf("cr3 %016jx @ va=%016jx\n", pte, base);
1670 	ptp = (void *)PHYS_TO_DMAP(pte & ~(pt_entry_t)PAGE_MASK);
1671 	for (i = 0; i < 512; ++i) {
1672 		if (level == 0 && i == 128)
1673 			base += 0xFFFF000000000000LLU;
1674 		if (ptp[i]) {
1675 			kprintf("%*.*s ", level * 4, level * 4, "");
1676 			if (level == 1 && (ptp[i] & 0x180) == 0x180) {
1677 				kprintf("va=%016jx %3d term %016jx (1GB)\n",
1678 					base, i, ptp[i]);
1679 			} else if (level == 2 && (ptp[i] & 0x180) == 0x180) {
1680 				kprintf("va=%016jx %3d term %016jx (2MB)\n",
1681 					base, i, ptp[i]);
1682 			} else if (level == 3) {
1683 				kprintf("va=%016jx %3d term %016jx\n",
1684 					base, i, ptp[i]);
1685 			} else {
1686 				kprintf("va=%016jx %3d deep %016jx\n",
1687 					base, i, ptp[i]);
1688 				dump_pmap(pmap, ptp[i], level + 1, base);
1689 			}
1690 		}
1691 		base += incr;
1692 	}
1693 }
1694 
1695 #endif
1696 
1697 /*
1698  * Typically used to initialize a fictitious page by vm/device_pager.c
1699  */
1700 void
1701 pmap_page_init(struct vm_page *m)
1702 {
1703 	vm_page_init(m);
1704 	m->md.interlock_count = 0;
1705 }
1706 
1707 /***************************************************
1708  * Low level helper routines.....
1709  ***************************************************/
1710 
1711 /*
1712  * Extract the physical page address associated with the map/VA pair.
1713  * The page must be wired for this to work reliably.
1714  */
1715 vm_paddr_t
1716 pmap_extract(pmap_t pmap, vm_offset_t va, void **handlep)
1717 {
1718 	vm_paddr_t rtval;
1719 	pv_entry_t pt_pv;
1720 	pt_entry_t *ptep;
1721 
1722 	rtval = 0;
1723 	if (va >= VM_MAX_USER_ADDRESS) {
1724 		/*
1725 		 * Kernel page directories might be direct-mapped and
1726 		 * there is typically no PV tracking of pte's
1727 		 */
1728 		pd_entry_t *pt;
1729 
1730 		pt = pmap_pt(pmap, va);
1731 		if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) {
1732 			if (*pt & pmap->pmap_bits[PG_PS_IDX]) {
1733 				rtval = *pt & PG_PS_FRAME;
1734 				rtval |= va & PDRMASK;
1735 			} else {
1736 				ptep = pmap_pt_to_pte(*pt, va);
1737 				if (*pt & pmap->pmap_bits[PG_V_IDX]) {
1738 					rtval = *ptep & PG_FRAME;
1739 					rtval |= va & PAGE_MASK;
1740 				}
1741 			}
1742 		}
1743 		if (handlep)
1744 			*handlep = NULL;
1745 	} else {
1746 		/*
1747 		 * User pages currently do not direct-map the page directory
1748 		 * and some pages might not used managed PVs.  But all PT's
1749 		 * will have a PV.
1750 		 */
1751 		pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1752 		if (pt_pv) {
1753 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1754 			if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1755 				rtval = *ptep & PG_FRAME;
1756 				rtval |= va & PAGE_MASK;
1757 			}
1758 			if (handlep)
1759 				*handlep = pt_pv;	/* locked until done */
1760 			else
1761 				pv_put (pt_pv);
1762 		} else if (handlep) {
1763 			*handlep = NULL;
1764 		}
1765 	}
1766 	return rtval;
1767 }
1768 
1769 void
1770 pmap_extract_done(void *handle)
1771 {
1772 	if (handle)
1773 		pv_put((pv_entry_t)handle);
1774 }
1775 
1776 /*
1777  * Similar to extract but checks protections, SMP-friendly short-cut for
1778  * vm_fault_page[_quick]().  Can return NULL to cause the caller to
1779  * fall-through to the real fault code.  Does not work with HVM page
1780  * tables.
1781  *
1782  * if busyp is NULL the returned page, if not NULL, is held (and not busied).
1783  *
1784  * If busyp is not NULL and this function sets *busyp non-zero, the returned
1785  * page is busied (and not held).
1786  *
1787  * If busyp is not NULL and this function sets *busyp to zero, the returned
1788  * page is held (and not busied).
1789  *
1790  * If VM_PROT_WRITE is set in prot, and the pte is already writable, the
1791  * returned page will be dirtied.  If the pte is not already writable NULL
1792  * is returned.  In otherwords, if the bit is set and a vm_page_t is returned,
1793  * any COW will already have happened and that page can be written by the
1794  * caller.
1795  *
1796  * WARNING! THE RETURNED PAGE IS ONLY HELD AND NOT SUITABLE FOR READING
1797  *	    OR WRITING AS-IS.
1798  */
1799 vm_page_t
1800 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot, int *busyp)
1801 {
1802 	if (pmap &&
1803 	    va < VM_MAX_USER_ADDRESS &&
1804 	    (pmap->pm_flags & PMAP_HVM) == 0) {
1805 		pv_entry_t pt_pv;
1806 		pv_entry_t pte_pv;
1807 		pt_entry_t *ptep;
1808 		pt_entry_t req;
1809 		vm_page_t m;
1810 		int error;
1811 
1812 		req = pmap->pmap_bits[PG_V_IDX] |
1813 		      pmap->pmap_bits[PG_U_IDX];
1814 		if (prot & VM_PROT_WRITE)
1815 			req |= pmap->pmap_bits[PG_RW_IDX];
1816 
1817 		pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1818 		if (pt_pv == NULL)
1819 			return (NULL);
1820 		ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1821 		if ((*ptep & req) != req) {
1822 			pv_put(pt_pv);
1823 			return (NULL);
1824 		}
1825 		pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), NULL, &error);
1826 		if (pte_pv && error == 0) {
1827 			m = pte_pv->pv_m;
1828 			if (prot & VM_PROT_WRITE) {
1829 				/* interlocked by presence of pv_entry */
1830 				vm_page_dirty(m);
1831 			}
1832 			if (busyp) {
1833 				if (prot & VM_PROT_WRITE) {
1834 					if (vm_page_busy_try(m, TRUE))
1835 						m = NULL;
1836 					*busyp = 1;
1837 				} else {
1838 					vm_page_hold(m);
1839 					*busyp = 0;
1840 				}
1841 			} else {
1842 				vm_page_hold(m);
1843 			}
1844 			pv_put(pte_pv);
1845 		} else if (pte_pv) {
1846 			pv_drop(pte_pv);
1847 			m = NULL;
1848 		} else {
1849 			/* error, since we didn't request a placemarker */
1850 			m = NULL;
1851 		}
1852 		pv_put(pt_pv);
1853 		return(m);
1854 	} else {
1855 		return(NULL);
1856 	}
1857 }
1858 
1859 /*
1860  * Extract the physical page address associated kernel virtual address.
1861  */
1862 vm_paddr_t
1863 pmap_kextract(vm_offset_t va)
1864 {
1865 	pd_entry_t pt;		/* pt entry in pd */
1866 	vm_paddr_t pa;
1867 
1868 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1869 		pa = DMAP_TO_PHYS(va);
1870 	} else {
1871 		pt = *vtopt(va);
1872 		if (pt & kernel_pmap->pmap_bits[PG_PS_IDX]) {
1873 			pa = (pt & PG_PS_FRAME) | (va & PDRMASK);
1874 		} else {
1875 			/*
1876 			 * Beware of a concurrent promotion that changes the
1877 			 * PDE at this point!  For example, vtopte() must not
1878 			 * be used to access the PTE because it would use the
1879 			 * new PDE.  It is, however, safe to use the old PDE
1880 			 * because the page table page is preserved by the
1881 			 * promotion.
1882 			 */
1883 			pa = *pmap_pt_to_pte(pt, va);
1884 			pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1885 		}
1886 	}
1887 	return pa;
1888 }
1889 
1890 /***************************************************
1891  * Low level mapping routines.....
1892  ***************************************************/
1893 
1894 /*
1895  * Add a wired page to the KVA and invalidate the mapping on all CPUs.
1896  */
1897 void
1898 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1899 {
1900 	pt_entry_t *ptep;
1901 	pt_entry_t npte;
1902 
1903 	npte = pa |
1904 	       kernel_pmap->pmap_bits[PG_RW_IDX] |
1905 	       kernel_pmap->pmap_bits[PG_V_IDX];
1906 //	       pgeflag;
1907 	ptep = vtopte(va);
1908 #if 1
1909 	pmap_inval_smp(kernel_pmap, va, 1, ptep, npte);
1910 #else
1911 	/* FUTURE */
1912 	if (*ptep)
1913 		pmap_inval_smp(kernel_pmap, va, ptep, npte);
1914 	else
1915 		*ptep = npte;
1916 #endif
1917 }
1918 
1919 /*
1920  * Similar to pmap_kenter(), except we only invalidate the mapping on the
1921  * current CPU.  Returns 0 if the previous pte was 0, 1 if it wasn't
1922  * (caller can conditionalize calling smp_invltlb()).
1923  */
1924 int
1925 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1926 {
1927 	pt_entry_t *ptep;
1928 	pt_entry_t npte;
1929 	int res;
1930 
1931 	npte = pa | kernel_pmap->pmap_bits[PG_RW_IDX] |
1932 		    kernel_pmap->pmap_bits[PG_V_IDX];
1933 	// npte |= pgeflag;
1934 	ptep = vtopte(va);
1935 #if 1
1936 	res = 1;
1937 #else
1938 	/* FUTURE */
1939 	res = (*ptep != 0);
1940 #endif
1941 	atomic_swap_long(ptep, npte);
1942 	cpu_invlpg((void *)va);
1943 
1944 	return res;
1945 }
1946 
1947 /*
1948  * Enter addresses into the kernel pmap but don't bother
1949  * doing any tlb invalidations.  Caller will do a rollup
1950  * invalidation via pmap_rollup_inval().
1951  */
1952 int
1953 pmap_kenter_noinval(vm_offset_t va, vm_paddr_t pa)
1954 {
1955 	pt_entry_t *ptep;
1956 	pt_entry_t npte;
1957 	int res;
1958 
1959 	npte = pa |
1960 	    kernel_pmap->pmap_bits[PG_RW_IDX] |
1961 	    kernel_pmap->pmap_bits[PG_V_IDX];
1962 //	    pgeflag;
1963 	ptep = vtopte(va);
1964 #if 1
1965 	res = 1;
1966 #else
1967 	/* FUTURE */
1968 	res = (*ptep != 0);
1969 #endif
1970 	atomic_swap_long(ptep, npte);
1971 	cpu_invlpg((void *)va);
1972 
1973 	return res;
1974 }
1975 
1976 /*
1977  * remove a page from the kernel pagetables
1978  */
1979 void
1980 pmap_kremove(vm_offset_t va)
1981 {
1982 	pt_entry_t *ptep;
1983 
1984 	ptep = vtopte(va);
1985 	pmap_inval_smp(kernel_pmap, va, 1, ptep, 0);
1986 }
1987 
1988 void
1989 pmap_kremove_quick(vm_offset_t va)
1990 {
1991 	pt_entry_t *ptep;
1992 
1993 	ptep = vtopte(va);
1994 	atomic_readandclear_long(ptep);
1995 	cpu_invlpg((void *)va);
1996 }
1997 
1998 /*
1999  * Remove addresses from the kernel pmap but don't bother
2000  * doing any tlb invalidations.  Caller will do a rollup
2001  * invalidation via pmap_rollup_inval().
2002  */
2003 void
2004 pmap_kremove_noinval(vm_offset_t va)
2005 {
2006 	pt_entry_t *ptep;
2007 
2008 	ptep = vtopte(va);
2009 	atomic_readandclear_long(ptep);
2010 }
2011 
2012 /*
2013  * XXX these need to be recoded.  They are not used in any critical path.
2014  */
2015 void
2016 pmap_kmodify_rw(vm_offset_t va)
2017 {
2018 	atomic_set_long(vtopte(va), kernel_pmap->pmap_bits[PG_RW_IDX]);
2019 	cpu_invlpg((void *)va);
2020 }
2021 
2022 /* NOT USED
2023 void
2024 pmap_kmodify_nc(vm_offset_t va)
2025 {
2026 	atomic_set_long(vtopte(va), PG_N);
2027 	cpu_invlpg((void *)va);
2028 }
2029 */
2030 
2031 /*
2032  * Used to map a range of physical addresses into kernel virtual
2033  * address space during the low level boot, typically to map the
2034  * dump bitmap, message buffer, and vm_page_array.
2035  *
2036  * These mappings are typically made at some pointer after the end of the
2037  * kernel text+data.
2038  *
2039  * We could return PHYS_TO_DMAP(start) here and not allocate any
2040  * via (*virtp), but then kmem from userland and kernel dumps won't
2041  * have access to the related pointers.
2042  */
2043 vm_offset_t
2044 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
2045 {
2046 	vm_offset_t va;
2047 	vm_offset_t va_start;
2048 
2049 	/*return PHYS_TO_DMAP(start);*/
2050 
2051 	va_start = *virtp;
2052 	va = va_start;
2053 
2054 	while (start < end) {
2055 		pmap_kenter_quick(va, start);
2056 		va += PAGE_SIZE;
2057 		start += PAGE_SIZE;
2058 	}
2059 	*virtp = va;
2060 	return va_start;
2061 }
2062 
2063 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
2064 
2065 /*
2066  * Remove the specified set of pages from the data and instruction caches.
2067  *
2068  * In contrast to pmap_invalidate_cache_range(), this function does not
2069  * rely on the CPU's self-snoop feature, because it is intended for use
2070  * when moving pages into a different cache domain.
2071  */
2072 void
2073 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
2074 {
2075 	vm_offset_t daddr, eva;
2076 	int i;
2077 
2078 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
2079 	    (cpu_feature & CPUID_CLFSH) == 0)
2080 		wbinvd();
2081 	else {
2082 		cpu_mfence();
2083 		for (i = 0; i < count; i++) {
2084 			daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
2085 			eva = daddr + PAGE_SIZE;
2086 			for (; daddr < eva; daddr += cpu_clflush_line_size)
2087 				clflush(daddr);
2088 		}
2089 		cpu_mfence();
2090 	}
2091 }
2092 
2093 void
2094 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
2095 {
2096 	KASSERT((sva & PAGE_MASK) == 0,
2097 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
2098 	KASSERT((eva & PAGE_MASK) == 0,
2099 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
2100 
2101 	if (cpu_feature & CPUID_SS) {
2102 		; /* If "Self Snoop" is supported, do nothing. */
2103 	} else {
2104 		/* Globally invalidate caches */
2105 		cpu_wbinvd_on_all_cpus();
2106 	}
2107 }
2108 
2109 /*
2110  * Invalidate the specified range of virtual memory on all cpus associated
2111  * with the pmap.
2112  */
2113 void
2114 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2115 {
2116 	pmap_inval_smp(pmap, sva, (eva - sva) >> PAGE_SHIFT, NULL, 0);
2117 }
2118 
2119 /*
2120  * Add a list of wired pages to the kva.  This routine is used for temporary
2121  * kernel mappings such as those found in buffer cache buffer.  Page
2122  * modifications and accesses are not tracked or recorded.
2123  *
2124  * NOTE! Old mappings are simply overwritten, and we cannot assume relaxed
2125  *	 semantics as previous mappings may have been zerod without any
2126  *	 invalidation.
2127  *
2128  * The page *must* be wired.
2129  */
2130 static __inline void
2131 _pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count, int doinval)
2132 {
2133 	vm_offset_t end_va;
2134 	vm_offset_t va;
2135 
2136 	end_va = beg_va + count * PAGE_SIZE;
2137 
2138 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2139 		pt_entry_t pte;
2140 		pt_entry_t *ptep;
2141 
2142 		ptep = vtopte(va);
2143 		pte = VM_PAGE_TO_PHYS(*m) |
2144 			kernel_pmap->pmap_bits[PG_RW_IDX] |
2145 			kernel_pmap->pmap_bits[PG_V_IDX] |
2146 			kernel_pmap->pmap_cache_bits_pte[(*m)->pat_mode];
2147 //		pgeflag;
2148 		atomic_swap_long(ptep, pte);
2149 		m++;
2150 	}
2151 	if (doinval)
2152 		pmap_invalidate_range(kernel_pmap, beg_va, end_va);
2153 }
2154 
2155 void
2156 pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count)
2157 {
2158 	_pmap_qenter(beg_va, m, count, 1);
2159 }
2160 
2161 void
2162 pmap_qenter_noinval(vm_offset_t beg_va, vm_page_t *m, int count)
2163 {
2164 	_pmap_qenter(beg_va, m, count, 0);
2165 }
2166 
2167 /*
2168  * This routine jerks page mappings from the kernel -- it is meant only
2169  * for temporary mappings such as those found in buffer cache buffers.
2170  * No recording modified or access status occurs.
2171  *
2172  * MPSAFE, INTERRUPT SAFE (cluster callback)
2173  */
2174 void
2175 pmap_qremove(vm_offset_t beg_va, int count)
2176 {
2177 	vm_offset_t end_va;
2178 	vm_offset_t va;
2179 
2180 	end_va = beg_va + count * PAGE_SIZE;
2181 
2182 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2183 		pt_entry_t *pte;
2184 
2185 		pte = vtopte(va);
2186 		atomic_readandclear_long(pte);
2187 		cpu_invlpg((void *)va);
2188 	}
2189 	pmap_invalidate_range(kernel_pmap, beg_va, end_va);
2190 }
2191 
2192 /*
2193  * This routine removes temporary kernel mappings, only invalidating them
2194  * on the current cpu.  It should only be used under carefully controlled
2195  * conditions.
2196  */
2197 void
2198 pmap_qremove_quick(vm_offset_t beg_va, int count)
2199 {
2200 	vm_offset_t end_va;
2201 	vm_offset_t va;
2202 
2203 	end_va = beg_va + count * PAGE_SIZE;
2204 
2205 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2206 		pt_entry_t *pte;
2207 
2208 		pte = vtopte(va);
2209 		atomic_readandclear_long(pte);
2210 		cpu_invlpg((void *)va);
2211 	}
2212 }
2213 
2214 /*
2215  * This routine removes temporary kernel mappings *without* invalidating
2216  * the TLB.  It can only be used on permanent kva reservations such as those
2217  * found in buffer cache buffers, under carefully controlled circumstances.
2218  *
2219  * NOTE: Repopulating these KVAs requires unconditional invalidation.
2220  *	 (pmap_qenter() does unconditional invalidation).
2221  */
2222 void
2223 pmap_qremove_noinval(vm_offset_t beg_va, int count)
2224 {
2225 	vm_offset_t end_va;
2226 	vm_offset_t va;
2227 
2228 	end_va = beg_va + count * PAGE_SIZE;
2229 
2230 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2231 		pt_entry_t *pte;
2232 
2233 		pte = vtopte(va);
2234 		atomic_readandclear_long(pte);
2235 	}
2236 }
2237 
2238 /*
2239  * Create a new thread and optionally associate it with a (new) process.
2240  * NOTE! the new thread's cpu may not equal the current cpu.
2241  */
2242 void
2243 pmap_init_thread(thread_t td)
2244 {
2245 	/* enforce pcb placement & alignment */
2246 	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
2247 	td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF);
2248 	td->td_savefpu = &td->td_pcb->pcb_save;
2249 	td->td_sp = (char *)td->td_pcb;	/* no -16 */
2250 }
2251 
2252 /*
2253  * This routine directly affects the fork perf for a process.
2254  */
2255 void
2256 pmap_init_proc(struct proc *p)
2257 {
2258 }
2259 
2260 static void
2261 pmap_pinit_defaults(struct pmap *pmap)
2262 {
2263 	bcopy(pmap_bits_default, pmap->pmap_bits,
2264 	      sizeof(pmap_bits_default));
2265 	bcopy(protection_codes, pmap->protection_codes,
2266 	      sizeof(protection_codes));
2267 	bcopy(pat_pte_index, pmap->pmap_cache_bits_pte,
2268 	      sizeof(pat_pte_index));
2269 	bcopy(pat_pde_index, pmap->pmap_cache_bits_pde,
2270 	      sizeof(pat_pte_index));
2271 	pmap->pmap_cache_mask_pte = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT;
2272 	pmap->pmap_cache_mask_pde = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PDE_PAT;
2273 	pmap->copyinstr = std_copyinstr;
2274 	pmap->copyin = std_copyin;
2275 	pmap->copyout = std_copyout;
2276 	pmap->fubyte = std_fubyte;
2277 	pmap->subyte = std_subyte;
2278 	pmap->fuword32 = std_fuword32;
2279 	pmap->fuword64 = std_fuword64;
2280 	pmap->suword32 = std_suword32;
2281 	pmap->suword64 = std_suword64;
2282 	pmap->swapu32 = std_swapu32;
2283 	pmap->swapu64 = std_swapu64;
2284 	pmap->fuwordadd32 = std_fuwordadd32;
2285 	pmap->fuwordadd64 = std_fuwordadd64;
2286 }
2287 
2288 /*
2289  * Initialize pmap0/vmspace0.
2290  *
2291  * On architectures where the kernel pmap is not integrated into the user
2292  * process pmap, this pmap represents the process pmap, not the kernel pmap.
2293  * kernel_pmap should be used to directly access the kernel_pmap.
2294  */
2295 void
2296 pmap_pinit0(struct pmap *pmap)
2297 {
2298 	int i;
2299 
2300 	pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
2301 	pmap->pm_count = 1;
2302 	CPUMASK_ASSZERO(pmap->pm_active);
2303 	pmap->pm_pvhint_pt = NULL;
2304 	pmap->pm_pvhint_unused = NULL;
2305 	RB_INIT(&pmap->pm_pvroot);
2306 	spin_init(&pmap->pm_spin, "pmapinit0");
2307 	for (i = 0; i < PM_PLACEMARKS; ++i)
2308 		pmap->pm_placemarks[i] = PM_NOPLACEMARK;
2309 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2310 	pmap_pinit_defaults(pmap);
2311 }
2312 
2313 /*
2314  * Initialize a preallocated and zeroed pmap structure,
2315  * such as one in a vmspace structure.
2316  */
2317 static void
2318 pmap_pinit_simple(struct pmap *pmap)
2319 {
2320 	int i;
2321 
2322 	/*
2323 	 * Misc initialization
2324 	 */
2325 	pmap->pm_count = 1;
2326 	CPUMASK_ASSZERO(pmap->pm_active);
2327 	pmap->pm_pvhint_pt = NULL;
2328 	pmap->pm_pvhint_unused = NULL;
2329 	pmap->pm_flags = PMAP_FLAG_SIMPLE;
2330 
2331 	pmap_pinit_defaults(pmap);
2332 
2333 	/*
2334 	 * Don't blow up locks/tokens on re-use (XXX fix/use drop code
2335 	 * for this).
2336 	 */
2337 	if (pmap->pm_pmlpv == NULL) {
2338 		RB_INIT(&pmap->pm_pvroot);
2339 		bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2340 		spin_init(&pmap->pm_spin, "pmapinitsimple");
2341 		for (i = 0; i < PM_PLACEMARKS; ++i)
2342 			pmap->pm_placemarks[i] = PM_NOPLACEMARK;
2343 	}
2344 }
2345 
2346 void
2347 pmap_pinit(struct pmap *pmap)
2348 {
2349 	pv_entry_t pv;
2350 	int j;
2351 
2352 	if (pmap->pm_pmlpv) {
2353 		/* Completely clear the cached pmap if not REGULAR_PMAP. */
2354 		if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) {
2355 			pmap_puninit(pmap);
2356 		}
2357 	}
2358 
2359 	pmap_pinit_simple(pmap);
2360 	pmap->pm_flags &= ~PMAP_FLAG_SIMPLE;
2361 
2362 	/*
2363 	 * No need to allocate page table space yet but we do need a valid
2364 	 * page directory table.
2365 	 */
2366 	if (pmap->pm_pml4 == NULL) {
2367 		pmap->pm_pml4 =
2368 		    (pml4_entry_t *)kmem_alloc_pageable(kernel_map,
2369 							PAGE_SIZE * 2,
2370 							VM_SUBSYS_PML4);
2371 		pmap->pm_pml4_iso = (void *)((char *)pmap->pm_pml4 + PAGE_SIZE);
2372 	}
2373 
2374 	/*
2375 	 * Allocate the PML4e table, which wires it even though it isn't
2376 	 * being entered into some higher level page table (it being the
2377 	 * highest level).  If one is already cached we don't have to do
2378 	 * anything.
2379 	 */
2380 	if ((pv = pmap->pm_pmlpv) == NULL) {
2381 		pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2382 		pmap->pm_pmlpv = pv;
2383 		pmap_kenter((vm_offset_t)pmap->pm_pml4,
2384 			    VM_PAGE_TO_PHYS(pv->pv_m));
2385 		pv_put(pv);
2386 
2387 		/*
2388 		 * Install DMAP and KMAP.
2389 		 */
2390 		for (j = 0; j < NDMPML4E; ++j) {
2391 			pmap->pm_pml4[DMPML4I + j] =
2392 			    (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2393 			    pmap->pmap_bits[PG_RW_IDX] |
2394 			    pmap->pmap_bits[PG_V_IDX] |
2395 			    pmap->pmap_bits[PG_A_IDX];
2396 		}
2397 		for (j = 0; j < NKPML4E; ++j) {
2398 			pmap->pm_pml4[KPML4I + j] =
2399 			    (KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2400 			    pmap->pmap_bits[PG_RW_IDX] |
2401 			    pmap->pmap_bits[PG_V_IDX] |
2402 			    pmap->pmap_bits[PG_A_IDX];
2403 		}
2404 
2405 		/*
2406 		 * install self-referential address mapping entry
2407 		 */
2408 		pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) |
2409 		    pmap->pmap_bits[PG_V_IDX] |
2410 		    pmap->pmap_bits[PG_RW_IDX] |
2411 		    pmap->pmap_bits[PG_A_IDX];
2412 	} else {
2413 		KKASSERT(pv->pv_m->flags & PG_MAPPED);
2414 		KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
2415 	}
2416 	KKASSERT(pmap->pm_pml4[255] == 0);
2417 
2418 	/*
2419 	 * When implementing an isolated userland pmap, a second PML4e table
2420 	 * is needed.  We use pmap_pml4_pindex() + 1 for convenience, but
2421 	 * note that we do not operate on this table using our API functions
2422 	 * so handling of the + 1 case is mostly just to prevent implosions.
2423 	 *
2424 	 * We install an isolated version of the kernel PDPs into this
2425 	 * second PML4e table.  The pmap code will mirror all user PDPs
2426 	 * between the primary and secondary PML4e table.
2427 	 */
2428 	if ((pv = pmap->pm_pmlpv_iso) == NULL && meltdown_mitigation &&
2429 	    pmap != &iso_pmap) {
2430 		pv = pmap_allocpte(pmap, pmap_pml4_pindex() + 1, NULL);
2431 		pmap->pm_pmlpv_iso = pv;
2432 		pmap_kenter((vm_offset_t)pmap->pm_pml4_iso,
2433 			    VM_PAGE_TO_PHYS(pv->pv_m));
2434 		pv_put(pv);
2435 
2436 		/*
2437 		 * Install an isolated version of the kernel pmap for
2438 		 * user consumption, using PDPs constructed in iso_pmap.
2439 		 */
2440 		for (j = 0; j < NKPML4E; ++j) {
2441 			pmap->pm_pml4_iso[KPML4I + j] =
2442 				iso_pmap.pm_pml4[KPML4I + j];
2443 		}
2444 	} else if (pv) {
2445 		KKASSERT(pv->pv_m->flags & PG_MAPPED);
2446 		KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
2447 	}
2448 }
2449 
2450 /*
2451  * Clean up a pmap structure so it can be physically freed.  This routine
2452  * is called by the vmspace dtor function.  A great deal of pmap data is
2453  * left passively mapped to improve vmspace management so we have a bit
2454  * of cleanup work to do here.
2455  */
2456 void
2457 pmap_puninit(pmap_t pmap)
2458 {
2459 	pv_entry_t pv;
2460 	vm_page_t p;
2461 
2462 	KKASSERT(CPUMASK_TESTZERO(pmap->pm_active));
2463 	if ((pv = pmap->pm_pmlpv) != NULL) {
2464 		if (pv_hold_try(pv) == 0)
2465 			pv_lock(pv);
2466 		KKASSERT(pv == pmap->pm_pmlpv);
2467 		p = pmap_remove_pv_page(pv, 1);
2468 		pv_free(pv, NULL);
2469 		pv = NULL;	/* safety */
2470 		pmap_kremove((vm_offset_t)pmap->pm_pml4);
2471 		vm_page_busy_wait(p, FALSE, "pgpun");
2472 		KKASSERT(p->flags & PG_UNQUEUED);
2473 		vm_page_unwire(p, 0);
2474 		vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
2475 		vm_page_free(p);
2476 		pmap->pm_pmlpv = NULL;
2477 	}
2478 	if ((pv = pmap->pm_pmlpv_iso) != NULL) {
2479 		if (pv_hold_try(pv) == 0)
2480 			pv_lock(pv);
2481 		KKASSERT(pv == pmap->pm_pmlpv_iso);
2482 		p = pmap_remove_pv_page(pv, 1);
2483 		pv_free(pv, NULL);
2484 		pv = NULL;	/* safety */
2485 		pmap_kremove((vm_offset_t)pmap->pm_pml4_iso);
2486 		vm_page_busy_wait(p, FALSE, "pgpun");
2487 		KKASSERT(p->flags & PG_UNQUEUED);
2488 		vm_page_unwire(p, 0);
2489 		vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
2490 		vm_page_free(p);
2491 		pmap->pm_pmlpv_iso = NULL;
2492 	}
2493 	if (pmap->pm_pml4) {
2494 		KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys));
2495 		kmem_free(kernel_map,
2496 			  (vm_offset_t)pmap->pm_pml4, PAGE_SIZE * 2);
2497 		pmap->pm_pml4 = NULL;
2498 		pmap->pm_pml4_iso = NULL;
2499 	}
2500 	KKASSERT(pmap->pm_stats.resident_count == 0);
2501 	KKASSERT(pmap->pm_stats.wired_count == 0);
2502 }
2503 
2504 /*
2505  * This function is now unused (used to add the pmap to the pmap_list)
2506  */
2507 void
2508 pmap_pinit2(struct pmap *pmap)
2509 {
2510 }
2511 
2512 /*
2513  * Transform an initialized pmap for Intel EPT.
2514  */
2515 void
2516 pmap_ept_transform(pmap_t pmap, int flags)
2517 {
2518 	uint64_t pmap_bits_ept[PG_BITS_SIZE] = {
2519 		[TYPE_IDX]	= EPT_PMAP,
2520 		[PG_V_IDX]	= EPT_PG_READ | EPT_PG_EXECUTE,
2521 		[PG_RW_IDX]	= EPT_PG_WRITE,
2522 		[PG_U_IDX]	= 0, /* no support in EPT */
2523 		[PG_A_IDX]	= EPT_PG_A,
2524 		[PG_M_IDX]	= EPT_PG_M,
2525 		[PG_PS_IDX]	= EPT_PG_PS,
2526 		[PG_G_IDX]	= 0, /* no support in EPT */
2527 		[PG_W_IDX]	= EPT_PG_AVAIL1,
2528 		[PG_MANAGED_IDX] = EPT_PG_AVAIL2,
2529 		[PG_N_IDX]	= EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_UC,
2530 		[PG_NX_IDX]	= 0, /* no support in EPT */
2531 	};
2532 	uint64_t protection_codes_ept[PROTECTION_CODES_SIZE] = {
2533 		[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_NONE   ] = 0,
2534 		[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_NONE   ] = 0,
2535 		[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_EXECUTE] = 0,
2536 		[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_EXECUTE] = 0,
2537 		[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE   ] =
2538 			pmap_bits_ept[PG_RW_IDX],
2539 		[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =
2540 			pmap_bits_ept[PG_RW_IDX],
2541 		[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE   ] =
2542 			pmap_bits_ept[PG_RW_IDX],
2543 		[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =
2544 			pmap_bits_ept[PG_RW_IDX],
2545 	};
2546 	pt_entry_t pmap_cache_bits_ept[PAT_INDEX_SIZE] = {
2547 		[PAT_UNCACHEABLE]	= EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_UC,
2548 		[PAT_WRITE_COMBINING]	= EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_WC,
2549 		[PAT_WRITE_THROUGH]	= EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_WT,
2550 		[PAT_WRITE_PROTECTED]	= EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_WP,
2551 		[PAT_WRITE_BACK]	= EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_WB,
2552 		[PAT_UNCACHED]		= EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_UC,
2553 	};
2554 	pt_entry_t pmap_cache_mask_ept = EPT_PG_IGNORE_PAT | EPT_MEM_TYPE_MASK;
2555 
2556 	pmap->pm_flags |= (flags | PMAP_HVM);
2557 	bcopy(pmap_bits_ept, pmap->pmap_bits, sizeof(pmap_bits_ept));
2558 	bcopy(protection_codes_ept, pmap->protection_codes,
2559 	      sizeof(protection_codes_ept));
2560 	bcopy(pmap_cache_bits_ept, pmap->pmap_cache_bits_pte,
2561 	      sizeof(pmap_cache_bits_ept));
2562 	bcopy(pmap_cache_bits_ept, pmap->pmap_cache_bits_pde,
2563 	      sizeof(pmap_cache_bits_ept));
2564 	pmap->pmap_cache_mask_pte = pmap_cache_mask_ept;
2565 	pmap->pmap_cache_mask_pde = pmap_cache_mask_ept;
2566 
2567 	/*
2568 	 * Zero out page directories.  These are only used by the VM.  Note
2569 	 * that the valid area is two pages if there is a pm_pmlpv_iso PTE
2570 	 * installed, otherwise it is only one page.  The ISO page isn't used
2571 	 * either way but clean it out anyway if it exists.
2572 	 */
2573 	if (pmap->pm_pmlpv_iso != NULL)
2574 		bzero(pmap->pm_pml4, PAGE_SIZE * 2);
2575 	else
2576 		bzero(pmap->pm_pml4, PAGE_SIZE);
2577 }
2578 
2579 /*
2580  * Transform an initialized pmap for AMD NPT/RVI.
2581  */
2582 void
2583 pmap_npt_transform(pmap_t pmap, int flags)
2584 {
2585 	uint64_t protection_codes_npt[PROTECTION_CODES_SIZE] = {
2586 		[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_NONE   ] = 0,
2587 		[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_NONE   ] = 0,
2588 		[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_EXECUTE] = 0,
2589 		[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_EXECUTE] = 0,
2590 		[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE   ] =
2591 			pmap_bits_default[PG_RW_IDX],
2592 		[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =
2593 			pmap_bits_default[PG_RW_IDX],
2594 		[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE   ] =
2595 			pmap_bits_default[PG_RW_IDX],
2596 		[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =
2597 			pmap_bits_default[PG_RW_IDX],
2598 	};
2599 
2600 	pmap->pm_flags |= (flags | PMAP_HVM);
2601 	pmap->pmap_bits[TYPE_IDX] = NPT_PMAP;
2602 	/* Set PG_G and PG_NX bits to 0, similar to the EPT case above. */
2603 	pmap->pmap_bits[PG_G_IDX] = 0;
2604 	pmap->pmap_bits[PG_NX_IDX] = 0;
2605 
2606 	bcopy(protection_codes_npt, pmap->protection_codes,
2607 	      sizeof(protection_codes_npt));
2608 
2609 	if (pmap->pm_pmlpv_iso != NULL)
2610 		bzero(pmap->pm_pml4, PAGE_SIZE * 2);
2611 	else
2612 		bzero(pmap->pm_pml4, PAGE_SIZE);
2613 }
2614 
2615 /*
2616  * This routine is called when various levels in the page table need to
2617  * be populated.  This routine cannot fail.
2618  *
2619  * This function returns two locked pv_entry's, one representing the
2620  * requested pv and one representing the requested pv's parent pv.  If
2621  * an intermediate page table does not exist it will be created, mapped,
2622  * wired, and the parent page table will be given an additional hold
2623  * count representing the presence of the child pv_entry.
2624  */
2625 static
2626 pv_entry_t
2627 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp)
2628 {
2629 	pt_entry_t *ptep;
2630 	pt_entry_t *ptep_iso;
2631 	pv_entry_t pv;
2632 	pv_entry_t pvp;
2633 	pt_entry_t v;
2634 	vm_page_t m;
2635 	int isnew;
2636 	int ispt;
2637 
2638 	/*
2639 	 * If the pv already exists and we aren't being asked for the
2640 	 * parent page table page we can just return it.  A locked+held pv
2641 	 * is returned.  The pv will also have a second hold related to the
2642 	 * pmap association that we don't have to worry about.
2643 	 */
2644 	ispt = 0;
2645 	pv = pv_alloc(pmap, ptepindex, &isnew);
2646 	if (isnew == 0 && pvpp == NULL)
2647 		return(pv);
2648 
2649 	/*
2650 	 * DragonFly doesn't use PV's to represent terminal PTEs any more.
2651 	 * The index range is still used for placemarkers, but not for
2652 	 * actual pv_entry's.
2653 	 */
2654 	KKASSERT(ptepindex >= pmap_pt_pindex(0));
2655 
2656 	/*
2657 	 * Note that pt_pv's are only returned for user VAs. We assert that
2658 	 * a pt_pv is not being requested for kernel VAs.  The kernel
2659 	 * pre-wires all higher-level page tables so don't overload managed
2660 	 * higher-level page tables on top of it!
2661 	 *
2662 	 * However, its convenient for us to allow the case when creating
2663 	 * iso_pmap.  This is a bit of a hack but it simplifies iso_pmap
2664 	 * a lot.
2665 	 */
2666 
2667 	/*
2668 	 * The kernel never uses managed PT/PD/PDP pages.
2669 	 */
2670 	KKASSERT(pmap != kernel_pmap);
2671 
2672 	/*
2673 	 * Non-terminal PVs allocate a VM page to represent the page table,
2674 	 * so we have to resolve pvp and calculate ptepindex for the pvp
2675 	 * and then for the page table entry index in the pvp for
2676 	 * fall-through.
2677 	 */
2678 	if (ptepindex < pmap_pd_pindex(0)) {
2679 		/*
2680 		 * pv is PT, pvp is PD
2681 		 */
2682 		ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT;
2683 		ptepindex += NUPTE_TOTAL + NUPT_TOTAL;
2684 		pvp = pmap_allocpte(pmap, ptepindex, NULL);
2685 
2686 		/*
2687 		 * PT index in PD
2688 		 */
2689 		ptepindex = pv->pv_pindex - pmap_pt_pindex(0);
2690 		ptepindex &= ((1ul << NPDEPGSHIFT) - 1);
2691 		ispt = 1;
2692 	} else if (ptepindex < pmap_pdp_pindex(0)) {
2693 		/*
2694 		 * pv is PD, pvp is PDP
2695 		 *
2696 		 * SIMPLE PMAP NOTE: Simple pmaps do not allocate above
2697 		 *		     the PD.
2698 		 */
2699 		ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT;
2700 		ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
2701 
2702 		if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
2703 			KKASSERT(pvpp == NULL);
2704 			pvp = NULL;
2705 		} else {
2706 			pvp = pmap_allocpte(pmap, ptepindex, NULL);
2707 		}
2708 
2709 		/*
2710 		 * PD index in PDP
2711 		 */
2712 		ptepindex = pv->pv_pindex - pmap_pd_pindex(0);
2713 		ptepindex &= ((1ul << NPDPEPGSHIFT) - 1);
2714 	} else if (ptepindex < pmap_pml4_pindex()) {
2715 		/*
2716 		 * pv is PDP, pvp is the root pml4 table
2717 		 */
2718 		pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2719 
2720 		/*
2721 		 * PDP index in PML4
2722 		 */
2723 		ptepindex = pv->pv_pindex - pmap_pdp_pindex(0);
2724 		ptepindex &= ((1ul << NPML4EPGSHIFT) - 1);
2725 	} else {
2726 		/*
2727 		 * pv represents the top-level PML4, there is no parent.
2728 		 */
2729 		pvp = NULL;
2730 	}
2731 
2732 	if (isnew == 0)
2733 		goto notnew;
2734 
2735 	/*
2736 	 * (isnew) is TRUE.
2737 	 *
2738 	 * (1) Add a wire count to the parent page table (pvp).
2739 	 * (2) Allocate a VM page for the page table.
2740 	 * (3) Enter the VM page into the parent page table.
2741 	 *
2742 	 * page table pages are marked PG_WRITEABLE and PG_MAPPED.
2743 	 */
2744 	if (pvp)
2745 		vm_page_wire_quick(pvp->pv_m);
2746 
2747 	for (;;) {
2748 		m = vm_page_alloc(NULL, pv->pv_pindex,
2749 				  VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
2750 				  VM_ALLOC_INTERRUPT);
2751 		if (m)
2752 			break;
2753 		vm_wait(0);
2754 	}
2755 	vm_page_wire(m);	/* wire for mapping in parent */
2756 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
2757 	m->valid = VM_PAGE_BITS_ALL;
2758 	vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_UNQUEUED);
2759 	KKASSERT(m->queue == PQ_NONE);
2760 
2761 	pv->pv_m = m;
2762 
2763 	/*
2764 	 * (isnew) is TRUE.
2765 	 *
2766 	 * Wire the page into pvp.  Bump the resident_count for the pmap.
2767 	 * There is no pvp for the top level, address the pm_pml4[] array
2768 	 * directly.
2769 	 *
2770 	 * If the caller wants the parent we return it, otherwise
2771 	 * we just put it away.
2772 	 *
2773 	 * No interlock is needed for pte 0 -> non-zero.
2774 	 *
2775 	 * In the situation where *ptep is valid we might have an unmanaged
2776 	 * page table page shared from another page table which we need to
2777 	 * unshare before installing our private page table page.
2778 	 */
2779 	if (pvp) {
2780 		v = VM_PAGE_TO_PHYS(m) |
2781 		    (pmap->pmap_bits[PG_RW_IDX] |
2782 		     pmap->pmap_bits[PG_V_IDX] |
2783 		     pmap->pmap_bits[PG_A_IDX]);
2784 		if (ptepindex < NUPTE_USER)
2785 			v |= pmap->pmap_bits[PG_U_IDX];
2786 		if (ptepindex < pmap_pt_pindex(0))
2787 			v |= pmap->pmap_bits[PG_M_IDX];
2788 
2789 		ptep = pv_pte_lookup(pvp, ptepindex);
2790 		if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso)
2791 			ptep_iso = pv_pte_lookup(pmap->pm_pmlpv_iso, ptepindex);
2792 		else
2793 			ptep_iso  = NULL;
2794 		if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
2795 			panic("pmap_allocpte: ptpte present without pv_entry!");
2796 		} else {
2797 			pt_entry_t pte;
2798 
2799 			pte = atomic_swap_long(ptep, v);
2800 			if (ptep_iso)
2801 				atomic_swap_long(ptep_iso, v);
2802 			if (pte != 0) {
2803 				kprintf("install pgtbl mixup 0x%016jx "
2804 					"old/new 0x%016jx/0x%016jx\n",
2805 					(intmax_t)ptepindex, pte, v);
2806 			}
2807 		}
2808 	}
2809 	vm_page_wakeup(m);
2810 
2811 notnew:
2812 	/*
2813 	 * (isnew) may be TRUE or FALSE.
2814 	 */
2815 	if (pvp) {
2816 		KKASSERT(pvp->pv_m != NULL);
2817 		ptep = pv_pte_lookup(pvp, ptepindex);
2818 		v = VM_PAGE_TO_PHYS(pv->pv_m) |
2819 		    (pmap->pmap_bits[PG_RW_IDX] |
2820 		     pmap->pmap_bits[PG_V_IDX] |
2821 		     pmap->pmap_bits[PG_A_IDX]);
2822 		if (ptepindex < NUPTE_USER)
2823 			v |= pmap->pmap_bits[PG_U_IDX];
2824 		if (ptepindex < pmap_pt_pindex(0))
2825 			v |= pmap->pmap_bits[PG_M_IDX];
2826 		if (*ptep != v) {
2827 			kprintf("mismatched upper level pt %016jx/%016jx\n",
2828 				*ptep, v);
2829 		}
2830 	}
2831 	if (pvpp)
2832 		*pvpp = pvp;
2833 	else if (pvp)
2834 		pv_put(pvp);
2835 	return (pv);
2836 }
2837 
2838 /*
2839  * Release any resources held by the given physical map.
2840  *
2841  * Called when a pmap initialized by pmap_pinit is being released.  Should
2842  * only be called if the map contains no valid mappings.
2843  */
2844 struct pmap_release_info {
2845 	pmap_t	pmap;
2846 	int	retry;
2847 	pv_entry_t pvp;
2848 };
2849 
2850 static int pmap_release_callback(pv_entry_t pv, void *data);
2851 
2852 void
2853 pmap_release(struct pmap *pmap)
2854 {
2855 	struct pmap_release_info info;
2856 
2857 	KASSERT(CPUMASK_TESTZERO(pmap->pm_active),
2858 		("pmap still active! %016jx",
2859 		(uintmax_t)CPUMASK_LOWMASK(pmap->pm_active)));
2860 
2861 	/*
2862 	 * There is no longer a pmap_list, if there were we would remove the
2863 	 * pmap from it here.
2864 	 */
2865 
2866 	/*
2867 	 * Pull pv's off the RB tree in order from low to high and release
2868 	 * each page.
2869 	 */
2870 	info.pmap = pmap;
2871 	do {
2872 		info.retry = 0;
2873 		info.pvp = NULL;
2874 
2875 		spin_lock(&pmap->pm_spin);
2876 		RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL,
2877 			pmap_release_callback, &info);
2878 		spin_unlock(&pmap->pm_spin);
2879 
2880 		if (info.pvp)
2881 			pv_put(info.pvp);
2882 	} while (info.retry);
2883 
2884 
2885 	/*
2886 	 * One resident page (the pml4 page) should remain.  Two if
2887 	 * the pmap has implemented an isolated userland PML4E table.
2888 	 * No wired pages should remain.
2889 	 */
2890 	int expected_res = 0;
2891 
2892 	if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0)
2893 		++expected_res;
2894 	if (pmap->pm_pmlpv_iso)
2895 		++expected_res;
2896 
2897 #if 1
2898 	if (pmap->pm_stats.resident_count != expected_res ||
2899 	    pmap->pm_stats.wired_count != 0) {
2900 		kprintf("fatal pmap problem - pmap %p flags %08x "
2901 			"rescnt=%jd wirecnt=%jd\n",
2902 			pmap,
2903 			pmap->pm_flags,
2904 			pmap->pm_stats.resident_count,
2905 			pmap->pm_stats.wired_count);
2906 		tsleep(pmap, 0, "DEAD", 0);
2907 	}
2908 #else
2909 	KKASSERT(pmap->pm_stats.resident_count == expected_res);
2910 	KKASSERT(pmap->pm_stats.wired_count == 0);
2911 #endif
2912 }
2913 
2914 /*
2915  * Called from low to high.  We must cache the proper parent pv so we
2916  * can adjust its wired count.
2917  */
2918 static int
2919 pmap_release_callback(pv_entry_t pv, void *data)
2920 {
2921 	struct pmap_release_info *info = data;
2922 	pmap_t pmap = info->pmap;
2923 	vm_pindex_t pindex;
2924 	int r;
2925 
2926 	/*
2927 	 * Acquire a held and locked pv, check for release race
2928 	 */
2929 	pindex = pv->pv_pindex;
2930 	if (info->pvp == pv) {
2931 		spin_unlock(&pmap->pm_spin);
2932 		info->pvp = NULL;
2933 	} else if (pv_hold_try(pv)) {
2934 		spin_unlock(&pmap->pm_spin);
2935 	} else {
2936 		spin_unlock(&pmap->pm_spin);
2937 		pv_lock(pv);
2938 		pv_put(pv);
2939 		info->retry = 1;
2940 		spin_lock(&pmap->pm_spin);
2941 
2942 		return -1;
2943 	}
2944 	KKASSERT(pv->pv_pmap == pmap && pindex == pv->pv_pindex);
2945 
2946 	if (pv->pv_pindex < pmap_pt_pindex(0)) {
2947 		/*
2948 		 * I am PTE, parent is PT
2949 		 */
2950 		pindex = pv->pv_pindex >> NPTEPGSHIFT;
2951 		pindex += NUPTE_TOTAL;
2952 	} else if (pv->pv_pindex < pmap_pd_pindex(0)) {
2953 		/*
2954 		 * I am PT, parent is PD
2955 		 */
2956 		pindex = (pv->pv_pindex - NUPTE_TOTAL) >> NPDEPGSHIFT;
2957 		pindex += NUPTE_TOTAL + NUPT_TOTAL;
2958 	} else if (pv->pv_pindex < pmap_pdp_pindex(0)) {
2959 		/*
2960 		 * I am PD, parent is PDP
2961 		 */
2962 		pindex = (pv->pv_pindex - NUPTE_TOTAL - NUPT_TOTAL) >>
2963 			 NPDPEPGSHIFT;
2964 		pindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
2965 	} else if (pv->pv_pindex < pmap_pml4_pindex()) {
2966 		/*
2967 		 * I am PDP, parent is PML4.  We always calculate the
2968 		 * normal PML4 here, not the isolated PML4.
2969 		 */
2970 		pindex = pmap_pml4_pindex();
2971 	} else {
2972 		/*
2973 		 * parent is NULL
2974 		 */
2975 		if (info->pvp) {
2976 			pv_put(info->pvp);
2977 			info->pvp = NULL;
2978 		}
2979 		pindex = 0;
2980 	}
2981 	if (pindex) {
2982 		if (info->pvp && info->pvp->pv_pindex != pindex) {
2983 			pv_put(info->pvp);
2984 			info->pvp = NULL;
2985 		}
2986 		if (info->pvp == NULL)
2987 			info->pvp = pv_get(pmap, pindex, NULL);
2988 	} else {
2989 		if (info->pvp) {
2990 			pv_put(info->pvp);
2991 			info->pvp = NULL;
2992 		}
2993 	}
2994 	r = pmap_release_pv(pv, info->pvp, NULL);
2995 	spin_lock(&pmap->pm_spin);
2996 
2997 	return(r);
2998 }
2999 
3000 /*
3001  * Called with held (i.e. also locked) pv.  This function will dispose of
3002  * the lock along with the pv.
3003  *
3004  * If the caller already holds the locked parent page table for pv it
3005  * must pass it as pvp, allowing us to avoid a deadlock, else it can
3006  * pass NULL for pvp.
3007  */
3008 static int
3009 pmap_release_pv(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk)
3010 {
3011 	vm_page_t p;
3012 
3013 	/*
3014 	 * The pmap is currently not spinlocked, pv is held+locked.
3015 	 * Remove the pv's page from its parent's page table.  The
3016 	 * parent's page table page's wire_count will be decremented.
3017 	 *
3018 	 * This will clean out the pte at any level of the page table.
3019 	 * If smp != 0 all cpus are affected.
3020 	 *
3021 	 * Do not tear-down recursively, its faster to just let the
3022 	 * release run its course.
3023 	 */
3024 	pmap_remove_pv_pte(pv, pvp, bulk, 0);
3025 
3026 	/*
3027 	 * Terminal pvs are unhooked from their vm_pages.  Because
3028 	 * terminal pages aren't page table pages they aren't wired
3029 	 * by us, so we have to be sure not to unwire them either.
3030 	 *
3031 	 * XXX It is unclear if this code ever gets called because we
3032 	 *     no longer use pv's to track terminal pages.
3033 	 */
3034 	if (pv->pv_pindex < pmap_pt_pindex(0)) {
3035 		pmap_remove_pv_page(pv, 0);
3036 		goto skip;
3037 	}
3038 
3039 	/*
3040 	 * We leave the top-level page table page cached, wired, and
3041 	 * mapped in the pmap until the dtor function (pmap_puninit())
3042 	 * gets called.
3043 	 *
3044 	 * Since we are leaving the top-level pv intact we need
3045 	 * to break out of what would otherwise be an infinite loop.
3046 	 *
3047 	 * This covers both the normal and the isolated PML4 page.
3048 	 */
3049 	if (pv->pv_pindex >= pmap_pml4_pindex()) {
3050 		pv_put(pv);
3051 		return(-1);
3052 	}
3053 
3054 	/*
3055 	 * For page table pages (other than the top-level page),
3056 	 * remove and free the vm_page.  The representitive mapping
3057 	 * removed above by pmap_remove_pv_pte() did not undo the
3058 	 * last wire_count so we have to do that as well.
3059 	 */
3060 	p = pmap_remove_pv_page(pv, 1);
3061 	vm_page_busy_wait(p, FALSE, "pmaprl");
3062 	if (p->wire_count != 1) {
3063 		const char *tstr;
3064 
3065 		if (pv->pv_pindex >= pmap_pdp_pindex(0))
3066 			tstr = "PDP";
3067 		else if (pv->pv_pindex >= pmap_pd_pindex(0))
3068 			tstr = "PD";
3069 		else if (pv->pv_pindex >= pmap_pt_pindex(0))
3070 			tstr = "PT";
3071 		else
3072 			tstr = "PTE";
3073 
3074 		kprintf("p(%s) p->wire_count was %016lx %d\n",
3075 			tstr, pv->pv_pindex, p->wire_count);
3076 	}
3077 	KKASSERT(p->wire_count == 1);
3078 	KKASSERT(p->flags & PG_UNQUEUED);
3079 
3080 	vm_page_unwire(p, 0);
3081 	KKASSERT(p->wire_count == 0);
3082 
3083 	vm_page_free(p);
3084 skip:
3085 	pv_free(pv, pvp);
3086 
3087 	return 0;
3088 }
3089 
3090 /*
3091  * This function will remove the pte associated with a pv from its parent.
3092  * Terminal pv's are supported.  All cpus specified by (bulk) are properly
3093  * invalidated.
3094  *
3095  * The wire count will be dropped on the parent page table.  The wire
3096  * count on the page being removed (pv->pv_m) from the parent page table
3097  * is NOT touched.  Note that terminal pages will not have any additional
3098  * wire counts while page table pages will have at least one representing
3099  * the mapping, plus others representing sub-mappings.
3100  *
3101  * NOTE: Cannot be called on kernel page table pages, only KVM terminal
3102  *	 pages and user page table and terminal pages.
3103  *
3104  * NOTE: The pte being removed might be unmanaged, and the pv supplied might
3105  *	 be freshly allocated and not imply that the pte is managed.  In this
3106  *	 case pv->pv_m should be NULL.
3107  *
3108  * The pv must be locked.  The pvp, if supplied, must be locked.  All
3109  * supplied pv's will remain locked on return.
3110  *
3111  * XXX must lock parent pv's if they exist to remove pte XXX
3112  */
3113 static
3114 void
3115 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk,
3116 		   int destroy)
3117 {
3118 	vm_pindex_t ptepindex = pv->pv_pindex;
3119 	pmap_t pmap = pv->pv_pmap;
3120 	vm_page_t p;
3121 	int gotpvp = 0;
3122 
3123 	KKASSERT(pmap);
3124 
3125 	if (ptepindex >= pmap_pml4_pindex()) {
3126 		/*
3127 		 * We are the top level PML4E table, there is no parent.
3128 		 *
3129 		 * This is either the normal or isolated PML4E table.
3130 		 * Only the normal is used in regular operation, the isolated
3131 		 * is only passed in when breaking down the whole pmap.
3132 		 */
3133 		p = pmap->pm_pmlpv->pv_m;
3134 		KKASSERT(pv->pv_m == p);	/* debugging */
3135 	} else if (ptepindex >= pmap_pdp_pindex(0)) {
3136 		/*
3137 		 * Remove a PDP page from the PML4E.  This can only occur
3138 		 * with user page tables.  We do not have to lock the
3139 		 * pml4 PV so just ignore pvp.
3140 		 */
3141 		vm_pindex_t pml4_pindex;
3142 		vm_pindex_t pdp_index;
3143 		pml4_entry_t *pdp;
3144 		pml4_entry_t *pdp_iso;
3145 
3146 		pdp_index = ptepindex - pmap_pdp_pindex(0);
3147 		if (pvp == NULL) {
3148 			pml4_pindex = pmap_pml4_pindex();
3149 			pvp = pv_get(pv->pv_pmap, pml4_pindex, NULL);
3150 			KKASSERT(pvp);
3151 			gotpvp = 1;
3152 		}
3153 
3154 		pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)];
3155 		KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0);
3156 		p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
3157 		pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp, 0);
3158 
3159 		/*
3160 		 * Also remove the PDP from the isolated PML4E if the
3161 		 * process uses one.
3162 		 */
3163 		if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso) {
3164 			pdp_iso = &pmap->pm_pml4_iso[pdp_index &
3165 						((1ul << NPML4EPGSHIFT) - 1)];
3166 			pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp_iso, 0);
3167 		}
3168 		KKASSERT(pv->pv_m == p);	/* debugging */
3169 	} else if (ptepindex >= pmap_pd_pindex(0)) {
3170 		/*
3171 		 * Remove a PD page from the PDP
3172 		 *
3173 		 * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case
3174 		 *		     of a simple pmap because it stops at
3175 		 *		     the PD page.
3176 		 */
3177 		vm_pindex_t pdp_pindex;
3178 		vm_pindex_t pd_index;
3179 		pdp_entry_t *pd;
3180 
3181 		pd_index = ptepindex - pmap_pd_pindex(0);
3182 
3183 		if (pvp == NULL) {
3184 			pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
3185 				     (pd_index >> NPML4EPGSHIFT);
3186 			pvp = pv_get(pv->pv_pmap, pdp_pindex, NULL);
3187 			gotpvp = 1;
3188 		}
3189 
3190 		if (pvp) {
3191 			pd = pv_pte_lookup(pvp, pd_index &
3192 						((1ul << NPDPEPGSHIFT) - 1));
3193 			KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0);
3194 			p = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
3195 			pmap_inval_bulk(bulk, (vm_offset_t)-1, pd, 0);
3196 		} else {
3197 			KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE);
3198 			p = pv->pv_m;		/* degenerate test later */
3199 		}
3200 		KKASSERT(pv->pv_m == p);	/* debugging */
3201 	} else if (ptepindex >= pmap_pt_pindex(0)) {
3202 		/*
3203 		 *  Remove a PT page from the PD
3204 		 */
3205 		vm_pindex_t pd_pindex;
3206 		vm_pindex_t pt_index;
3207 		pd_entry_t *pt;
3208 
3209 		pt_index = ptepindex - pmap_pt_pindex(0);
3210 
3211 		if (pvp == NULL) {
3212 			pd_pindex = NUPTE_TOTAL + NUPT_TOTAL +
3213 				    (pt_index >> NPDPEPGSHIFT);
3214 			pvp = pv_get(pv->pv_pmap, pd_pindex, NULL);
3215 			KKASSERT(pvp);
3216 			gotpvp = 1;
3217 		}
3218 
3219 		pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1));
3220 #if 0
3221 		KASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0,
3222 			("*pt unexpectedly invalid %016jx "
3223 			 "gotpvp=%d ptepindex=%ld ptindex=%ld pv=%p pvp=%p",
3224 			*pt, gotpvp, ptepindex, pt_index, pv, pvp));
3225 		p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
3226 #else
3227 		if ((*pt & pmap->pmap_bits[PG_V_IDX]) == 0) {
3228 			kprintf("*pt unexpectedly invalid %016jx "
3229 			        "gotpvp=%d ptepindex=%ld ptindex=%ld "
3230 				"pv=%p pvp=%p\n",
3231 				*pt, gotpvp, ptepindex, pt_index, pv, pvp);
3232 			tsleep(pt, 0, "DEAD", 0);
3233 			p = pv->pv_m;
3234 		} else {
3235 			p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
3236 		}
3237 #endif
3238 		pmap_inval_bulk(bulk, (vm_offset_t)-1, pt, 0);
3239 		KKASSERT(pv->pv_m == p);	/* debugging */
3240 	} else {
3241 		KKASSERT(0);
3242 	}
3243 
3244 	/*
3245 	 * If requested, scrap the underlying pv->pv_m and the underlying
3246 	 * pv.  If this is a page-table-page we must also free the page.
3247 	 *
3248 	 * pvp must be returned locked.
3249 	 */
3250 	if (destroy == 1) {
3251 		/*
3252 		 * page table page (PT, PD, PDP, PML4), caller was responsible
3253 		 * for testing wired_count.
3254 		 */
3255 		KKASSERT(pv->pv_m->wire_count == 1);
3256 		p = pmap_remove_pv_page(pv, 1);
3257 		pv_free(pv, pvp);
3258 		pv = NULL;
3259 
3260 		vm_page_busy_wait(p, FALSE, "pgpun");
3261 		vm_page_unwire(p, 0);
3262 		vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
3263 		vm_page_free(p);
3264 	}
3265 
3266 	/*
3267 	 * If we acquired pvp ourselves then we are responsible for
3268 	 * recursively deleting it.
3269 	 */
3270 	if (pvp && gotpvp) {
3271 		/*
3272 		 * Recursively destroy higher-level page tables.
3273 		 *
3274 		 * This is optional.  If we do not, they will still
3275 		 * be destroyed when the process exits.
3276 		 *
3277 		 * NOTE: Do not destroy pv_entry's with extra hold refs,
3278 		 *	 a caller may have unlocked it and intends to
3279 		 *	 continue to use it.
3280 		 */
3281 		if (pmap_dynamic_delete &&
3282 		    pvp->pv_m &&
3283 		    pvp->pv_m->wire_count == 1 &&
3284 		    (pvp->pv_hold & PV_HOLD_MASK) == 2 &&
3285 		    pvp->pv_pindex < pmap_pml4_pindex()) {
3286 			if (pmap != kernel_pmap) {
3287 				pmap_remove_pv_pte(pvp, NULL, bulk, 1);
3288 				pvp = NULL;	/* safety */
3289 			} else {
3290 				kprintf("Attempt to remove kernel_pmap pindex "
3291 					"%jd\n", pvp->pv_pindex);
3292 				pv_put(pvp);
3293 			}
3294 		} else {
3295 			pv_put(pvp);
3296 		}
3297 	}
3298 }
3299 
3300 /*
3301  * Remove the vm_page association to a pv.  The pv must be locked.
3302  */
3303 static
3304 vm_page_t
3305 pmap_remove_pv_page(pv_entry_t pv, int clrpgbits)
3306 {
3307 	vm_page_t m;
3308 
3309 	m = pv->pv_m;
3310 	pv->pv_m = NULL;
3311 	if (clrpgbits)
3312 		vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3313 
3314 	return(m);
3315 }
3316 
3317 /*
3318  * Grow the number of kernel page table entries, if needed.
3319  *
3320  * This routine is always called to validate any address space
3321  * beyond KERNBASE (for kldloads).  kernel_vm_end only governs the address
3322  * space below KERNBASE.
3323  *
3324  * kernel_map must be locked exclusively by the caller.
3325  */
3326 void
3327 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
3328 {
3329 	vm_paddr_t paddr;
3330 	vm_offset_t ptppaddr;
3331 	vm_page_t nkpg;
3332 	pd_entry_t *pt, newpt;
3333 	pdp_entry_t *pd, newpd;
3334 	int update_kernel_vm_end;
3335 
3336 	/*
3337 	 * bootstrap kernel_vm_end on first real VM use
3338 	 */
3339 	if (kernel_vm_end == 0) {
3340 		kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
3341 
3342 		for (;;) {
3343 			pt = pmap_pt(kernel_pmap, kernel_vm_end);
3344 			if (pt == NULL)
3345 				break;
3346 			if ((*pt & kernel_pmap->pmap_bits[PG_V_IDX]) == 0)
3347 				break;
3348 			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
3349 					~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3350 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3351 				kernel_vm_end = vm_map_max(kernel_map);
3352 				break;
3353 			}
3354 		}
3355 	}
3356 
3357 	/*
3358 	 * Fill in the gaps.  kernel_vm_end is only adjusted for ranges
3359 	 * below KERNBASE.  Ranges above KERNBASE are kldloaded and we
3360 	 * do not want to force-fill 128G worth of page tables.
3361 	 */
3362 	if (kstart < KERNBASE) {
3363 		if (kstart > kernel_vm_end)
3364 			kstart = kernel_vm_end;
3365 		KKASSERT(kend <= KERNBASE);
3366 		update_kernel_vm_end = 1;
3367 	} else {
3368 		update_kernel_vm_end = 0;
3369 	}
3370 
3371 	kstart = rounddown2(kstart, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3372 	kend = roundup2(kend, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3373 
3374 	if (kend - 1 >= vm_map_max(kernel_map))
3375 		kend = vm_map_max(kernel_map);
3376 
3377 	while (kstart < kend) {
3378 		pt = pmap_pt(kernel_pmap, kstart);
3379 		if (pt == NULL) {
3380 			/*
3381 			 * We need a new PD entry
3382 			 */
3383 			nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3384 			                     VM_ALLOC_NORMAL |
3385 					     VM_ALLOC_SYSTEM |
3386 					     VM_ALLOC_INTERRUPT);
3387 			if (nkpg == NULL) {
3388 				panic("pmap_growkernel: no memory to grow "
3389 				      "kernel");
3390 			}
3391 			paddr = VM_PAGE_TO_PHYS(nkpg);
3392 			pmap_zero_page(paddr);
3393 			pd = pmap_pd(kernel_pmap, kstart);
3394 
3395 			newpd = (pdp_entry_t)
3396 			    (paddr |
3397 			    kernel_pmap->pmap_bits[PG_V_IDX] |
3398 			    kernel_pmap->pmap_bits[PG_RW_IDX] |
3399 			    kernel_pmap->pmap_bits[PG_A_IDX]);
3400 			atomic_swap_long(pd, newpd);
3401 
3402 #if 0
3403 			kprintf("NEWPD pd=%p pde=%016jx phys=%016jx\n",
3404 				pd, newpd, paddr);
3405 #endif
3406 
3407 			continue; /* try again */
3408 		}
3409 
3410 		if ((*pt & kernel_pmap->pmap_bits[PG_V_IDX]) != 0) {
3411 			kstart = (kstart + PAGE_SIZE * NPTEPG) &
3412 				 ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3413 			if (kstart - 1 >= vm_map_max(kernel_map)) {
3414 				kstart = vm_map_max(kernel_map);
3415 				break;
3416 			}
3417 			continue;
3418 		}
3419 
3420 		/*
3421 		 * We need a new PT
3422 		 *
3423 		 * This index is bogus, but out of the way
3424 		 */
3425 		nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3426 				     VM_ALLOC_NORMAL |
3427 				     VM_ALLOC_SYSTEM |
3428 				     VM_ALLOC_INTERRUPT);
3429 		if (nkpg == NULL)
3430 			panic("pmap_growkernel: no memory to grow kernel");
3431 
3432 		vm_page_wire(nkpg);
3433 		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
3434 		pmap_zero_page(ptppaddr);
3435 		newpt = (pd_entry_t)(ptppaddr |
3436 				     kernel_pmap->pmap_bits[PG_V_IDX] |
3437 				     kernel_pmap->pmap_bits[PG_RW_IDX] |
3438 				     kernel_pmap->pmap_bits[PG_A_IDX]);
3439 		atomic_swap_long(pt, newpt);
3440 
3441 		kstart = (kstart + PAGE_SIZE * NPTEPG) &
3442 			  ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3443 
3444 		if (kstart - 1 >= vm_map_max(kernel_map)) {
3445 			kstart = vm_map_max(kernel_map);
3446 			break;
3447 		}
3448 	}
3449 
3450 	/*
3451 	 * Only update kernel_vm_end for areas below KERNBASE.
3452 	 */
3453 	if (update_kernel_vm_end && kernel_vm_end < kstart)
3454 		kernel_vm_end = kstart;
3455 }
3456 
3457 /*
3458  *	Add a reference to the specified pmap.
3459  */
3460 void
3461 pmap_reference(pmap_t pmap)
3462 {
3463 	if (pmap != NULL)
3464 		atomic_add_int(&pmap->pm_count, 1);
3465 }
3466 
3467 void
3468 pmap_maybethreaded(pmap_t pmap)
3469 {
3470 	atomic_set_int(&pmap->pm_flags, PMAP_MULTI);
3471 }
3472 
3473 /*
3474  * Called while page is hard-busied to clear the PG_MAPPED and PG_WRITEABLE
3475  * flags if able.  This can happen when the pmap code is unable to clear
3476  * the bits in prior actions due to not holding the page hard-busied at
3477  * the time.
3478  *
3479  * The clearing of PG_MAPPED/WRITEABLE is an optional optimization done
3480  * when the pte is removed and only if the pte has not been multiply-mapped.
3481  * The caller may have to call vm_page_protect() if the bits are still set
3482  * here.
3483  *
3484  * This function is expected to be quick.
3485  */
3486 int
3487 pmap_mapped_sync(vm_page_t m)
3488 {
3489 	return (m->flags);
3490 }
3491 
3492 /***************************************************
3493  * page management routines.
3494  ***************************************************/
3495 
3496 /*
3497  * Hold a pv without locking it
3498  */
3499 #if 0
3500 static void
3501 pv_hold(pv_entry_t pv)
3502 {
3503 	atomic_add_int(&pv->pv_hold, 1);
3504 }
3505 #endif
3506 
3507 /*
3508  * Hold a pv_entry, preventing its destruction.  TRUE is returned if the pv
3509  * was successfully locked, FALSE if it wasn't.  The caller must dispose of
3510  * the pv properly.
3511  *
3512  * Either the pmap->pm_spin or the related vm_page_spin (if traversing a
3513  * pv list via its page) must be held by the caller in order to stabilize
3514  * the pv.
3515  */
3516 static int
3517 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL)
3518 {
3519 	u_int count;
3520 
3521 	/*
3522 	 * Critical path shortcut expects pv to already have one ref
3523 	 * (for the pv->pv_pmap).
3524 	 */
3525 	count = pv->pv_hold;
3526 	cpu_ccfence();
3527 	for (;;) {
3528 		if ((count & PV_HOLD_LOCKED) == 0) {
3529 			if (atomic_fcmpset_int(&pv->pv_hold, &count,
3530 					      (count + 1) | PV_HOLD_LOCKED)) {
3531 #ifdef PMAP_DEBUG
3532 				pv->pv_func = func;
3533 				pv->pv_line = lineno;
3534 #endif
3535 				return TRUE;
3536 			}
3537 		} else {
3538 			if (atomic_fcmpset_int(&pv->pv_hold, &count, count + 1))
3539 				return FALSE;
3540 		}
3541 		/* retry */
3542 	}
3543 }
3544 
3545 /*
3546  * Drop a previously held pv_entry which could not be locked, allowing its
3547  * destruction.
3548  *
3549  * Must not be called with a spinlock held as we might zfree() the pv if it
3550  * is no longer associated with a pmap and this was the last hold count.
3551  */
3552 static void
3553 pv_drop(pv_entry_t pv)
3554 {
3555 	u_int count;
3556 
3557 	for (;;) {
3558 		count = pv->pv_hold;
3559 		cpu_ccfence();
3560 		KKASSERT((count & PV_HOLD_MASK) > 0);
3561 		KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) !=
3562 			 (PV_HOLD_LOCKED | 1));
3563 		if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) {
3564 			if ((count & PV_HOLD_MASK) == 1) {
3565 #ifdef PMAP_DEBUG2
3566 				if (pmap_enter_debug > 0) {
3567 					--pmap_enter_debug;
3568 					kprintf("pv_drop: free pv %p\n", pv);
3569 				}
3570 #endif
3571 				KKASSERT(count == 1);
3572 				KKASSERT(pv->pv_pmap == NULL);
3573 				zfree(pvzone, pv);
3574 			}
3575 			return;
3576 		}
3577 		/* retry */
3578 	}
3579 }
3580 
3581 /*
3582  * Find or allocate the requested PV entry, returning a locked, held pv.
3583  *
3584  * If (*isnew) is non-zero, the returned pv will have two hold counts, one
3585  * for the caller and one representing the pmap and vm_page association.
3586  *
3587  * If (*isnew) is zero, the returned pv will have only one hold count.
3588  *
3589  * Since both associations can only be adjusted while the pv is locked,
3590  * together they represent just one additional hold.
3591  */
3592 static
3593 pv_entry_t
3594 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL)
3595 {
3596 	struct mdglobaldata *md = mdcpu;
3597 	pv_entry_t pv;
3598 	pv_entry_t pnew;
3599 	int pmap_excl = 0;
3600 
3601 	pnew = NULL;
3602 	if (md->gd_newpv) {
3603 #if 1
3604 		pnew = atomic_swap_ptr((void *)&md->gd_newpv, NULL);
3605 #else
3606 		crit_enter();
3607 		pnew = md->gd_newpv;	/* might race NULL */
3608 		md->gd_newpv = NULL;
3609 		crit_exit();
3610 #endif
3611 	}
3612 	if (pnew == NULL)
3613 		pnew = zalloc(pvzone);
3614 
3615 	spin_lock_shared(&pmap->pm_spin);
3616 	for (;;) {
3617 		/*
3618 		 * Shortcut cache
3619 		 */
3620 		pv = pv_entry_lookup(pmap, pindex);
3621 		if (pv == NULL) {
3622 			vm_pindex_t *pmark;
3623 
3624 			/*
3625 			 * Requires exclusive pmap spinlock
3626 			 */
3627 			if (pmap_excl == 0) {
3628 				pmap_excl = 1;
3629 				if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3630 					spin_unlock_shared(&pmap->pm_spin);
3631 					spin_lock(&pmap->pm_spin);
3632 					continue;
3633 				}
3634 			}
3635 
3636 			/*
3637 			 * We need to block if someone is holding our
3638 			 * placemarker.  As long as we determine the
3639 			 * placemarker has not been aquired we do not
3640 			 * need to get it as acquision also requires
3641 			 * the pmap spin lock.
3642 			 *
3643 			 * However, we can race the wakeup.
3644 			 */
3645 			pmark = pmap_placemarker_hash(pmap, pindex);
3646 
3647 			if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3648 				tsleep_interlock(pmark, 0);
3649 				atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3650 				if (((*pmark ^ pindex) &
3651 				     ~PM_PLACEMARK_WAKEUP) == 0) {
3652 					spin_unlock(&pmap->pm_spin);
3653 					tsleep(pmark, PINTERLOCKED, "pvplc", 0);
3654 					spin_lock(&pmap->pm_spin);
3655 				}
3656 				continue;
3657 			}
3658 
3659 			/*
3660 			 * Setup the new entry
3661 			 */
3662 			pnew->pv_pmap = pmap;
3663 			pnew->pv_pindex = pindex;
3664 			pnew->pv_hold = PV_HOLD_LOCKED | 2;
3665 			pnew->pv_flags = 0;
3666 #ifdef PMAP_DEBUG
3667 			pnew->pv_func = func;
3668 			pnew->pv_line = lineno;
3669 			if (pnew->pv_line_lastfree > 0) {
3670 				pnew->pv_line_lastfree =
3671 						-pnew->pv_line_lastfree;
3672 			}
3673 #endif
3674 			pv = pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew);
3675 			atomic_add_long(&pmap->pm_stats.resident_count, 1);
3676 			spin_unlock(&pmap->pm_spin);
3677 			*isnew = 1;
3678 
3679 			KASSERT(pv == NULL, ("pv insert failed %p->%p", pnew, pv));
3680 			return(pnew);
3681 		}
3682 
3683 		/*
3684 		 * We already have an entry, cleanup the staged pnew if
3685 		 * we can get the lock, otherwise block and retry.
3686 		 */
3687 		if (__predict_true(_pv_hold_try(pv PMAP_DEBUG_COPY))) {
3688 			if (pmap_excl)
3689 				spin_unlock(&pmap->pm_spin);
3690 			else
3691 				spin_unlock_shared(&pmap->pm_spin);
3692 #if 1
3693 			pnew = atomic_swap_ptr((void *)&md->gd_newpv, pnew);
3694 			if (pnew)
3695 				zfree(pvzone, pnew);
3696 #else
3697 			crit_enter();
3698 			if (md->gd_newpv == NULL)
3699 				md->gd_newpv = pnew;
3700 			else
3701 				zfree(pvzone, pnew);
3702 			crit_exit();
3703 #endif
3704 			KKASSERT(pv->pv_pmap == pmap &&
3705 				 pv->pv_pindex == pindex);
3706 			*isnew = 0;
3707 			return(pv);
3708 		}
3709 		if (pmap_excl) {
3710 			spin_unlock(&pmap->pm_spin);
3711 			_pv_lock(pv PMAP_DEBUG_COPY);
3712 			pv_put(pv);
3713 			spin_lock(&pmap->pm_spin);
3714 		} else {
3715 			spin_unlock_shared(&pmap->pm_spin);
3716 			_pv_lock(pv PMAP_DEBUG_COPY);
3717 			pv_put(pv);
3718 			spin_lock_shared(&pmap->pm_spin);
3719 		}
3720 	}
3721 	/* NOT REACHED */
3722 }
3723 
3724 /*
3725  * Find the requested PV entry, returning a locked+held pv or NULL
3726  */
3727 static
3728 pv_entry_t
3729 _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp PMAP_DEBUG_DECL)
3730 {
3731 	pv_entry_t pv;
3732 	int pmap_excl = 0;
3733 
3734 	spin_lock_shared(&pmap->pm_spin);
3735 	for (;;) {
3736 		/*
3737 		 * Shortcut cache
3738 		 */
3739 		pv = pv_entry_lookup(pmap, pindex);
3740 		if (pv == NULL) {
3741 			/*
3742 			 * Block if there is ANY placemarker.  If we are to
3743 			 * return it, we must also aquire the spot, so we
3744 			 * have to block even if the placemarker is held on
3745 			 * a different address.
3746 			 *
3747 			 * OPTIMIZATION: If pmarkp is passed as NULL the
3748 			 * caller is just probing (or looking for a real
3749 			 * pv_entry), and in this case we only need to check
3750 			 * to see if the placemarker matches pindex.
3751 			 */
3752 			vm_pindex_t *pmark;
3753 
3754 			/*
3755 			 * Requires exclusive pmap spinlock
3756 			 */
3757 			if (pmap_excl == 0) {
3758 				pmap_excl = 1;
3759 				if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3760 					spin_unlock_shared(&pmap->pm_spin);
3761 					spin_lock(&pmap->pm_spin);
3762 					continue;
3763 				}
3764 			}
3765 
3766 			pmark = pmap_placemarker_hash(pmap, pindex);
3767 
3768 			if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3769 			    ((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3770 				tsleep_interlock(pmark, 0);
3771 				atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3772 				if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3773 				    ((*pmark ^ pindex) &
3774 				     ~PM_PLACEMARK_WAKEUP) == 0) {
3775 					spin_unlock(&pmap->pm_spin);
3776 					tsleep(pmark, PINTERLOCKED, "pvpld", 0);
3777 					spin_lock(&pmap->pm_spin);
3778 				}
3779 				continue;
3780 			}
3781 			if (pmarkp) {
3782 				if (atomic_swap_long(pmark, pindex) !=
3783 				    PM_NOPLACEMARK) {
3784 					panic("_pv_get: pmark race");
3785 				}
3786 				*pmarkp = pmark;
3787 			}
3788 			spin_unlock(&pmap->pm_spin);
3789 			return NULL;
3790 		}
3791 		if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
3792 			if (pmap_excl)
3793 				spin_unlock(&pmap->pm_spin);
3794 			else
3795 				spin_unlock_shared(&pmap->pm_spin);
3796 			KKASSERT(pv->pv_pmap == pmap &&
3797 				 pv->pv_pindex == pindex);
3798 			return(pv);
3799 		}
3800 		if (pmap_excl) {
3801 			spin_unlock(&pmap->pm_spin);
3802 			_pv_lock(pv PMAP_DEBUG_COPY);
3803 			pv_put(pv);
3804 			spin_lock(&pmap->pm_spin);
3805 		} else {
3806 			spin_unlock_shared(&pmap->pm_spin);
3807 			_pv_lock(pv PMAP_DEBUG_COPY);
3808 			pv_put(pv);
3809 			spin_lock_shared(&pmap->pm_spin);
3810 		}
3811 	}
3812 }
3813 
3814 /*
3815  * Lookup, hold, and attempt to lock (pmap,pindex).
3816  *
3817  * If the entry does not exist NULL is returned and *errorp is set to 0
3818  *
3819  * If the entry exists and could be successfully locked it is returned and
3820  * errorp is set to 0.
3821  *
3822  * If the entry exists but could NOT be successfully locked it is returned
3823  * held and *errorp is set to 1.
3824  *
3825  * If the entry is placemarked by someone else NULL is returned and *errorp
3826  * is set to 1.
3827  */
3828 static
3829 pv_entry_t
3830 pv_get_try(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp, int *errorp)
3831 {
3832 	pv_entry_t pv;
3833 
3834 	spin_lock_shared(&pmap->pm_spin);
3835 
3836 	pv = pv_entry_lookup(pmap, pindex);
3837 	if (pv == NULL) {
3838 		vm_pindex_t *pmark;
3839 
3840 		pmark = pmap_placemarker_hash(pmap, pindex);
3841 
3842 		if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3843 			*errorp = 1;
3844 		} else if (pmarkp &&
3845 			   atomic_cmpset_long(pmark, PM_NOPLACEMARK, pindex)) {
3846 			*errorp = 0;
3847 		} else {
3848 			/*
3849 			 * Can't set a placemark with a NULL pmarkp, or if
3850 			 * pmarkp is non-NULL but we failed to set our
3851 			 * placemark.
3852 			 */
3853 			*errorp = 1;
3854 		}
3855 		if (pmarkp)
3856 			*pmarkp = pmark;
3857 		spin_unlock_shared(&pmap->pm_spin);
3858 
3859 		return NULL;
3860 	}
3861 
3862 	/*
3863 	 * XXX This has problems if the lock is shared, why?
3864 	 */
3865 	if (pv_hold_try(pv)) {
3866 		spin_unlock_shared(&pmap->pm_spin);
3867 		*errorp = 0;
3868 		KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex);
3869 		return(pv);	/* lock succeeded */
3870 	}
3871 	spin_unlock_shared(&pmap->pm_spin);
3872 	*errorp = 1;
3873 
3874 	return (pv);		/* lock failed */
3875 }
3876 
3877 /*
3878  * Lock a held pv, keeping the hold count
3879  */
3880 static
3881 void
3882 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL)
3883 {
3884 	u_int count;
3885 
3886 	for (;;) {
3887 		count = pv->pv_hold;
3888 		cpu_ccfence();
3889 		if ((count & PV_HOLD_LOCKED) == 0) {
3890 			if (atomic_cmpset_int(&pv->pv_hold, count,
3891 					      count | PV_HOLD_LOCKED)) {
3892 #ifdef PMAP_DEBUG
3893 				pv->pv_func = func;
3894 				pv->pv_line = lineno;
3895 #endif
3896 				return;
3897 			}
3898 			continue;
3899 		}
3900 		tsleep_interlock(pv, 0);
3901 		if (atomic_cmpset_int(&pv->pv_hold, count,
3902 				      count | PV_HOLD_WAITING)) {
3903 #ifdef PMAP_DEBUG2
3904 			if (pmap_enter_debug > 0) {
3905 				--pmap_enter_debug;
3906 				kprintf("pv waiting on %s:%d\n",
3907 					pv->pv_func, pv->pv_line);
3908 			}
3909 #endif
3910 			tsleep(pv, PINTERLOCKED, "pvwait", hz);
3911 		}
3912 		/* retry */
3913 	}
3914 }
3915 
3916 /*
3917  * Unlock a held and locked pv, keeping the hold count.
3918  */
3919 static
3920 void
3921 pv_unlock(pv_entry_t pv)
3922 {
3923 	u_int count;
3924 
3925 	for (;;) {
3926 		count = pv->pv_hold;
3927 		cpu_ccfence();
3928 		KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >=
3929 			 (PV_HOLD_LOCKED | 1));
3930 		if (atomic_cmpset_int(&pv->pv_hold, count,
3931 				      count &
3932 				      ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) {
3933 			if (count & PV_HOLD_WAITING)
3934 				wakeup(pv);
3935 			break;
3936 		}
3937 	}
3938 }
3939 
3940 /*
3941  * Unlock and drop a pv.  If the pv is no longer associated with a pmap
3942  * and the hold count drops to zero we will free it.
3943  *
3944  * Caller should not hold any spin locks.  We are protected from hold races
3945  * by virtue of holds only occuring only with a pmap_spin or vm_page_spin
3946  * lock held.  A pv cannot be located otherwise.
3947  */
3948 static
3949 void
3950 pv_put(pv_entry_t pv)
3951 {
3952 #ifdef PMAP_DEBUG2
3953 	if (pmap_enter_debug > 0) {
3954 		--pmap_enter_debug;
3955 		kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold);
3956 	}
3957 #endif
3958 
3959 	/*
3960 	 * Normal put-aways must have a pv_m associated with the pv,
3961 	 * but allow the case where the pv has been destructed due
3962 	 * to pmap_dynamic_delete.
3963 	 */
3964 	KKASSERT(pv->pv_pmap == NULL || pv->pv_m != NULL);
3965 
3966 	/*
3967 	 * Fast - shortcut most common condition
3968 	 */
3969 	if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1))
3970 		return;
3971 
3972 	/*
3973 	 * Slow
3974 	 */
3975 	pv_unlock(pv);
3976 	pv_drop(pv);
3977 }
3978 
3979 /*
3980  * Remove the pmap association from a pv, require that pv_m already be removed,
3981  * then unlock and drop the pv.  Any pte operations must have already been
3982  * completed.  This call may result in a last-drop which will physically free
3983  * the pv.
3984  *
3985  * Removing the pmap association entails an additional drop.
3986  *
3987  * pv must be exclusively locked on call and will be disposed of on return.
3988  */
3989 static
3990 void
3991 _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL)
3992 {
3993 	pmap_t pmap;
3994 
3995 #ifdef PMAP_DEBUG
3996 	pv->pv_func_lastfree = func;
3997 	pv->pv_line_lastfree = lineno;
3998 #endif
3999 	KKASSERT(pv->pv_m == NULL);
4000 	KKASSERT((pv->pv_hold & (PV_HOLD_LOCKED|PV_HOLD_MASK)) >=
4001 		  (PV_HOLD_LOCKED|1));
4002 	if ((pmap = pv->pv_pmap) != NULL) {
4003 		spin_lock(&pmap->pm_spin);
4004 		KKASSERT(pv->pv_pmap == pmap);
4005 		if (pmap->pm_pvhint_pt == pv)
4006 			pmap->pm_pvhint_pt = NULL;
4007 		if (pmap->pm_pvhint_unused == pv)
4008 			pmap->pm_pvhint_unused = NULL;
4009 		pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv);
4010 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
4011 		pv->pv_pmap = NULL;
4012 		pv->pv_pindex = 0;
4013 		spin_unlock(&pmap->pm_spin);
4014 
4015 		/*
4016 		 * Try to shortcut three atomic ops, otherwise fall through
4017 		 * and do it normally.  Drop two refs and the lock all in
4018 		 * one go.
4019 		 */
4020 		if (pvp) {
4021 			if (vm_page_unwire_quick(pvp->pv_m))
4022 				panic("_pv_free: bad wirecount on pvp");
4023 		}
4024 		if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) {
4025 #ifdef PMAP_DEBUG2
4026 			if (pmap_enter_debug > 0) {
4027 				--pmap_enter_debug;
4028 				kprintf("pv_free: free pv %p\n", pv);
4029 			}
4030 #endif
4031 			zfree(pvzone, pv);
4032 			return;
4033 		}
4034 		pv_drop(pv);	/* ref for pv_pmap */
4035 	}
4036 	pv_unlock(pv);
4037 	pv_drop(pv);
4038 }
4039 
4040 /*
4041  * This routine is very drastic, but can save the system
4042  * in a pinch.
4043  */
4044 void
4045 pmap_collect(void)
4046 {
4047 	int i;
4048 	vm_page_t m;
4049 	static int warningdone=0;
4050 
4051 	if (pmap_pagedaemon_waken == 0)
4052 		return;
4053 	pmap_pagedaemon_waken = 0;
4054 	if (warningdone < 5) {
4055 		kprintf("pmap_collect: pv_entries exhausted -- "
4056 			"suggest increasing vm.pmap_pv_entries above %ld\n",
4057 			vm_pmap_pv_entries);
4058 		warningdone++;
4059 	}
4060 
4061 	for (i = 0; i < vm_page_array_size; i++) {
4062 		m = &vm_page_array[i];
4063 		if (m->wire_count || m->hold_count)
4064 			continue;
4065 		if (vm_page_busy_try(m, TRUE) == 0) {
4066 			if (m->wire_count == 0 && m->hold_count == 0) {
4067 				pmap_remove_all(m);
4068 			}
4069 			vm_page_wakeup(m);
4070 		}
4071 	}
4072 }
4073 
4074 /*
4075  * Scan the pmap for active page table entries and issue a callback.
4076  * The callback must dispose of pte_pv, whos PTE entry is at *ptep in
4077  * its parent page table.
4078  *
4079  * pte_pv will be NULL if the page or page table is unmanaged.
4080  * pt_pv will point to the page table page containing the pte for the page.
4081  *
4082  * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page),
4083  *	 we pass a NULL pte_pv and we pass a pt_pv pointing to the passed
4084  *	 process pmap's PD and page to the callback function.  This can be
4085  *	 confusing because the pt_pv is really a pd_pv, and the target page
4086  *	 table page is simply aliased by the pmap and not owned by it.
4087  *
4088  * It is assumed that the start and end are properly rounded to the page size.
4089  *
4090  * It is assumed that PD pages and above are managed and thus in the RB tree,
4091  * allowing us to use RB_SCAN from the PD pages down for ranged scans.
4092  */
4093 struct pmap_scan_info {
4094 	struct pmap *pmap;
4095 	vm_offset_t sva;
4096 	vm_offset_t eva;
4097 	vm_pindex_t sva_pd_pindex;
4098 	vm_pindex_t eva_pd_pindex;
4099 	void (*func)(pmap_t, struct pmap_scan_info *,
4100 		     vm_pindex_t *, pv_entry_t, vm_offset_t,
4101 		     pt_entry_t *, void *);
4102 	void *arg;
4103 	pmap_inval_bulk_t bulk_core;
4104 	pmap_inval_bulk_t *bulk;
4105 	int count;
4106 	int stop;
4107 };
4108 
4109 static int pmap_scan_cmp(pv_entry_t pv, void *data);
4110 static int pmap_scan_callback(pv_entry_t pv, void *data);
4111 
4112 static void
4113 pmap_scan(struct pmap_scan_info *info, int smp_inval)
4114 {
4115 	struct pmap *pmap = info->pmap;
4116 	pv_entry_t pt_pv;	/* A page table PV */
4117 	pv_entry_t pte_pv;	/* A page table entry PV */
4118 	vm_pindex_t *pte_placemark;
4119 	vm_pindex_t *pt_placemark;
4120 	pt_entry_t *ptep;
4121 	pt_entry_t oldpte;
4122 	struct pv_entry dummy_pv;
4123 
4124 	info->stop = 0;
4125 	if (pmap == NULL)
4126 		return;
4127 	if (info->sva == info->eva)
4128 		return;
4129 	if (smp_inval) {
4130 		info->bulk = &info->bulk_core;
4131 		pmap_inval_bulk_init(&info->bulk_core, pmap);
4132 	} else {
4133 		info->bulk = NULL;
4134 	}
4135 
4136 	/*
4137 	 * Hold the token for stability; if the pmap is empty we have nothing
4138 	 * to do.
4139 	 */
4140 #if 0
4141 	if (pmap->pm_stats.resident_count == 0) {
4142 		return;
4143 	}
4144 #endif
4145 
4146 	info->count = 0;
4147 
4148 	/*
4149 	 * Special handling for scanning one page, which is a very common
4150 	 * operation (it is?).
4151 	 *
4152 	 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4
4153 	 */
4154 	if (info->sva + PAGE_SIZE == info->eva) {
4155 		if (info->sva >= VM_MAX_USER_ADDRESS) {
4156 			/*
4157 			 * Kernel mappings do not track wire counts on
4158 			 * page table pages and only maintain pd_pv and
4159 			 * pte_pv levels so pmap_scan() works.
4160 			 */
4161 			pt_pv = NULL;
4162 			pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4163 					&pte_placemark);
4164 			KKASSERT(pte_pv == NULL);
4165 			ptep = vtopte(info->sva);
4166 		} else {
4167 			/*
4168 			 * We hold pte_placemark across the operation for
4169 			 * unmanaged pages.
4170 			 *
4171 			 * WARNING!  We must hold pt_placemark across the
4172 			 *	     *ptep test to prevent misintepreting
4173 			 *	     a non-zero *ptep as a shared page
4174 			 *	     table page.  Hold it across the function
4175 			 *	     callback as well for SMP safety.
4176 			 */
4177 			pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4178 					&pte_placemark);
4179 			KKASSERT(pte_pv == NULL);
4180 			pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva),
4181 				       &pt_placemark);
4182 			if (pt_pv == NULL) {
4183 #if 0
4184 				KKASSERT(0);
4185 				pd_pv = pv_get(pmap,
4186 					       pmap_pd_pindex(info->sva),
4187 					       NULL);
4188 				if (pd_pv) {
4189 					ptep = pv_pte_lookup(pd_pv,
4190 						    pmap_pt_index(info->sva));
4191 					if (*ptep) {
4192 						info->func(pmap, info,
4193 						     pt_placemark, pd_pv,
4194 						     info->sva, ptep,
4195 						     info->arg);
4196 					} else {
4197 						pv_placemarker_wakeup(pmap,
4198 								  pt_placemark);
4199 					}
4200 					pv_put(pd_pv);
4201 				} else {
4202 					pv_placemarker_wakeup(pmap,
4203 							      pt_placemark);
4204 				}
4205 #else
4206 				pv_placemarker_wakeup(pmap, pt_placemark);
4207 #endif
4208 				pv_placemarker_wakeup(pmap, pte_placemark);
4209 				goto fast_skip;
4210 			}
4211 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva));
4212 		}
4213 
4214 		/*
4215 		 * NOTE: *ptep can't be ripped out from under us if we hold
4216 		 *	 pte_pv (or pte_placemark) locked, but bits can
4217 		 *	 change.
4218 		 */
4219 		oldpte = *ptep;
4220 		cpu_ccfence();
4221 		if (oldpte == 0) {
4222 			KKASSERT(pte_pv == NULL);
4223 			pv_placemarker_wakeup(pmap, pte_placemark);
4224 		} else {
4225 			KASSERT((oldpte & pmap->pmap_bits[PG_V_IDX]) ==
4226 				pmap->pmap_bits[PG_V_IDX],
4227 			    ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL",
4228 			    *ptep, oldpte, info->sva));
4229 			info->func(pmap, info, pte_placemark, pt_pv,
4230 				   info->sva, ptep, info->arg);
4231 		}
4232 		if (pt_pv)
4233 			pv_put(pt_pv);
4234 fast_skip:
4235 		pmap_inval_bulk_flush(info->bulk);
4236 		return;
4237 	}
4238 
4239 	/*
4240 	 * Nominal scan case, RB_SCAN() for PD pages and iterate from
4241 	 * there.
4242 	 *
4243 	 * WARNING! eva can overflow our standard ((N + mask) >> bits)
4244 	 *	    bounds, resulting in a pd_pindex of 0.  To solve the
4245 	 *	    problem we use an inclusive range.
4246 	 */
4247 	info->sva_pd_pindex = pmap_pd_pindex(info->sva);
4248 	info->eva_pd_pindex = pmap_pd_pindex(info->eva - PAGE_SIZE);
4249 
4250 	if (info->sva >= VM_MAX_USER_ADDRESS) {
4251 		/*
4252 		 * The kernel does not currently maintain any pv_entry's for
4253 		 * higher-level page tables.
4254 		 */
4255 		bzero(&dummy_pv, sizeof(dummy_pv));
4256 		dummy_pv.pv_pindex = info->sva_pd_pindex;
4257 		spin_lock(&pmap->pm_spin);
4258 		while (dummy_pv.pv_pindex <= info->eva_pd_pindex) {
4259 			pmap_scan_callback(&dummy_pv, info);
4260 			++dummy_pv.pv_pindex;
4261 			if (dummy_pv.pv_pindex < info->sva_pd_pindex) /*wrap*/
4262 				break;
4263 		}
4264 		spin_unlock(&pmap->pm_spin);
4265 	} else {
4266 		/*
4267 		 * User page tables maintain local PML4, PDP, PD, and PT
4268 		 * pv_entry's.  pv_entry's are not used for PTEs.
4269 		 */
4270 		spin_lock(&pmap->pm_spin);
4271 		pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot, pmap_scan_cmp,
4272 					 pmap_scan_callback, info);
4273 		spin_unlock(&pmap->pm_spin);
4274 	}
4275 	pmap_inval_bulk_flush(info->bulk);
4276 }
4277 
4278 /*
4279  * WARNING! pmap->pm_spin held
4280  *
4281  * WARNING! eva can overflow our standard ((N + mask) >> bits)
4282  *	    bounds, resulting in a pd_pindex of 0.  To solve the
4283  *	    problem we use an inclusive range.
4284  */
4285 static int
4286 pmap_scan_cmp(pv_entry_t pv, void *data)
4287 {
4288 	struct pmap_scan_info *info = data;
4289 	if (pv->pv_pindex < info->sva_pd_pindex)
4290 		return(-1);
4291 	if (pv->pv_pindex > info->eva_pd_pindex)
4292 		return(1);
4293 	return(0);
4294 }
4295 
4296 /*
4297  * pmap_scan() by PDs
4298  *
4299  * WARNING! pmap->pm_spin held
4300  */
4301 static int
4302 pmap_scan_callback(pv_entry_t pv, void *data)
4303 {
4304 	struct pmap_scan_info *info = data;
4305 	struct pmap *pmap = info->pmap;
4306 	pv_entry_t pd_pv;	/* A page directory PV */
4307 	pv_entry_t pt_pv;	/* A page table PV */
4308 	vm_pindex_t *pt_placemark;
4309 	pt_entry_t *ptep;
4310 	pt_entry_t oldpte;
4311 	vm_offset_t sva;
4312 	vm_offset_t eva;
4313 	vm_offset_t va_next;
4314 	vm_pindex_t pd_pindex;
4315 	int error;
4316 
4317 	/*
4318 	 * Stop if requested
4319 	 */
4320 	if (info->stop)
4321 		return -1;
4322 
4323 	/*
4324 	 * Pull the PD pindex from the pv before releasing the spinlock.
4325 	 *
4326 	 * WARNING: pv is faked for kernel pmap scans.
4327 	 */
4328 	pd_pindex = pv->pv_pindex;
4329 	spin_unlock(&pmap->pm_spin);
4330 	pv = NULL;	/* invalid after spinlock unlocked */
4331 
4332 	/*
4333 	 * Calculate the page range within the PD.  SIMPLE pmaps are
4334 	 * direct-mapped for the entire 2^64 address space.  Normal pmaps
4335 	 * reflect the user and kernel address space which requires
4336 	 * cannonicalization w/regards to converting pd_pindex's back
4337 	 * into addresses.
4338 	 */
4339 	sva = (pd_pindex - pmap_pd_pindex(0)) << PDPSHIFT;
4340 	if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 &&
4341 	    (sva & PML4_SIGNMASK)) {
4342 		sva |= PML4_SIGNMASK;
4343 	}
4344 	eva = sva + NBPDP;	/* can overflow */
4345 	if (sva < info->sva)
4346 		sva = info->sva;
4347 	if (eva < info->sva || eva > info->eva)
4348 		eva = info->eva;
4349 
4350 	/*
4351 	 * NOTE: kernel mappings do not track page table pages, only
4352 	 * 	 terminal pages.
4353 	 *
4354 	 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4.
4355 	 *	 However, for the scan to be efficient we try to
4356 	 *	 cache items top-down.
4357 	 */
4358 	pd_pv = NULL;
4359 	pt_pv = NULL;
4360 
4361 	for (; sva < eva; sva = va_next) {
4362 		if (info->stop)
4363 			break;
4364 		if (sva >= VM_MAX_USER_ADDRESS) {
4365 			if (pt_pv) {
4366 				pv_put(pt_pv);
4367 				pt_pv = NULL;
4368 			}
4369 			goto kernel_skip;
4370 		}
4371 
4372 		/*
4373 		 * PD cache, scan shortcut if it doesn't exist.
4374 		 */
4375 		if (pd_pv == NULL) {
4376 			pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4377 		} else if (pd_pv->pv_pmap != pmap ||
4378 			   pd_pv->pv_pindex != pmap_pd_pindex(sva)) {
4379 			pv_put(pd_pv);
4380 			pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4381 		}
4382 		if (pd_pv == NULL) {
4383 			va_next = (sva + NBPDP) & ~PDPMASK;
4384 			if (va_next < sva)
4385 				va_next = eva;
4386 			continue;
4387 		}
4388 
4389 		/*
4390 		 * PT cache
4391 		 *
4392 		 * NOTE: The cached pt_pv can be removed from the pmap when
4393 		 *	 pmap_dynamic_delete is enabled.
4394 		 */
4395 		if (pt_pv && (pt_pv->pv_pmap != pmap ||
4396 			      pt_pv->pv_pindex != pmap_pt_pindex(sva))) {
4397 			pv_put(pt_pv);
4398 			pt_pv = NULL;
4399 		}
4400 		if (pt_pv == NULL) {
4401 			pt_pv = pv_get_try(pmap, pmap_pt_pindex(sva),
4402 					   &pt_placemark, &error);
4403 			if (error) {
4404 				pv_put(pd_pv);	/* lock order */
4405 				pd_pv = NULL;
4406 				if (pt_pv) {
4407 					pv_lock(pt_pv);
4408 					pv_put(pt_pv);
4409 					pt_pv = NULL;
4410 				} else {
4411 					pv_placemarker_wait(pmap, pt_placemark);
4412 				}
4413 				va_next = sva;
4414 				continue;
4415 			}
4416 			/* may have to re-check later if pt_pv is NULL here */
4417 		}
4418 
4419 		/*
4420 		 * If pt_pv is NULL we either have a shared page table
4421 		 * page (NOT IMPLEMENTED XXX) and must issue a callback
4422 		 * specific to that case, or there is no page table page.
4423 		 *
4424 		 * Either way we can skip the page table page.
4425 		 *
4426 		 * WARNING! pt_pv can also be NULL due to a pv creation
4427 		 *	    race where we find it to be NULL and then
4428 		 *	    later see a pte_pv.  But its possible the pt_pv
4429 		 *	    got created inbetween the two operations, so
4430 		 *	    we must check.
4431 		 *
4432 		 *	    XXX This should no longer be the case because
4433 		 *	    we have pt_placemark.
4434 		 */
4435 		if (pt_pv == NULL) {
4436 #if 0
4437 			/* XXX REMOVED */
4438 			/*
4439 			 * Possible unmanaged (shared from another pmap)
4440 			 * page table page.
4441 			 *
4442 			 * WARNING!  We must hold pt_placemark across the
4443 			 *	     *ptep test to prevent misintepreting
4444 			 *	     a non-zero *ptep as a shared page
4445 			 *	     table page.  Hold it across the function
4446 			 *	     callback as well for SMP safety.
4447 			 */
4448 			KKASSERT(0);
4449 			ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva));
4450 			if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
4451 				info->func(pmap, info, pt_placemark, pd_pv,
4452 					   sva, ptep, info->arg);
4453 			} else {
4454 				pv_placemarker_wakeup(pmap, pt_placemark);
4455 			}
4456 #else
4457 			pv_placemarker_wakeup(pmap, pt_placemark);
4458 #endif
4459 
4460 			/*
4461 			 * Done, move to next page table page.
4462 			 */
4463 			va_next = (sva + NBPDR) & ~PDRMASK;
4464 			if (va_next < sva)
4465 				va_next = eva;
4466 			continue;
4467 		}
4468 
4469 		/*
4470 		 * From this point in the loop testing pt_pv for non-NULL
4471 		 * means we are in UVM, else if it is NULL we are in KVM.
4472 		 *
4473 		 * Limit our scan to either the end of the va represented
4474 		 * by the current page table page, or to the end of the
4475 		 * range being removed.
4476 		 */
4477 kernel_skip:
4478 		va_next = (sva + NBPDR) & ~PDRMASK;
4479 		if (va_next < sva)
4480 			va_next = eva;
4481 		if (va_next > eva)
4482 			va_next = eva;
4483 
4484 		/*
4485 		 * Scan the page table for pages.  Some pages may not be
4486 		 * managed (might not have a pv_entry).
4487 		 *
4488 		 * There is no page table management for kernel pages so
4489 		 * pt_pv will be NULL in that case, but otherwise pt_pv
4490 		 * is non-NULL, locked, and referenced.
4491 		 */
4492 
4493 		/*
4494 		 * At this point a non-NULL pt_pv means a UVA, and a NULL
4495 		 * pt_pv means a KVA.
4496 		 */
4497 		if (pt_pv)
4498 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva));
4499 		else
4500 			ptep = vtopte(sva);
4501 
4502 		while (sva < va_next) {
4503 			vm_pindex_t *pte_placemark;
4504 			pv_entry_t pte_pv;
4505 
4506 			/*
4507 			 * Yield every 64 pages, stop if requested.
4508 			 */
4509 			if ((++info->count & 63) == 0)
4510 				lwkt_user_yield();
4511 			if (info->stop)
4512 				break;
4513 
4514 			/*
4515 			 * We can shortcut our scan if *ptep == 0.  This is
4516 			 * an unlocked check.
4517 			 */
4518 			if (*ptep == 0) {
4519 				sva += PAGE_SIZE;
4520 				++ptep;
4521 				continue;
4522 			}
4523 			cpu_ccfence();
4524 
4525 			/*
4526 			 * Acquire the pte_placemark.  pte_pv's won't exist
4527 			 * for leaf pages.
4528 			 *
4529 			 * A multitude of races are possible here so if we
4530 			 * cannot lock definite state we clean out our cache
4531 			 * and break the inner while() loop to force a loop
4532 			 * up to the top of the for().
4533 			 *
4534 			 * XXX unlock/relock pd_pv, pt_pv, and re-test their
4535 			 *     validity instead of looping up?
4536 			 */
4537 			pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva),
4538 					    &pte_placemark, &error);
4539 			KKASSERT(pte_pv == NULL);
4540 			if (error) {
4541 				if (pd_pv) {
4542 					pv_put(pd_pv);	/* lock order */
4543 					pd_pv = NULL;
4544 				}
4545 				if (pt_pv) {
4546 					pv_put(pt_pv);	/* lock order */
4547 					pt_pv = NULL;
4548 				}
4549 				pv_placemarker_wait(pmap, pte_placemark);
4550 				va_next = sva;		/* retry */
4551 				break;
4552 			}
4553 
4554 			/*
4555 			 * Reload *ptep after successfully locking the
4556 			 * pindex.
4557 			 */
4558 			cpu_ccfence();
4559 			oldpte = *ptep;
4560 			if (oldpte == 0) {
4561 				pv_placemarker_wakeup(pmap, pte_placemark);
4562 				sva += PAGE_SIZE;
4563 				++ptep;
4564 				continue;
4565 			}
4566 
4567 			/*
4568 			 * We can't hold pd_pv across the callback (because
4569 			 * we don't pass it to the callback and the callback
4570 			 * might deadlock)
4571 			 */
4572 			if (pd_pv) {
4573 				vm_page_wire_quick(pd_pv->pv_m);
4574 				pv_unlock(pd_pv);
4575 			}
4576 
4577 			/*
4578 			 * Ready for the callback.  The locked placemarker
4579 			 * is consumed by the callback.
4580 			 */
4581 			if (oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) {
4582 				/*
4583 				 * Managed pte
4584 				 */
4585 				KASSERT((oldpte & pmap->pmap_bits[PG_V_IDX]),
4586 				    ("badC *ptep %016lx/%016lx sva %016lx",
4587 				    *ptep, oldpte, sva));
4588 				/*
4589 				 * We must unlock pd_pv across the callback
4590 				 * to avoid deadlocks on any recursive
4591 				 * disposal.  Re-check that it still exists
4592 				 * after re-locking.
4593 				 *
4594 				 * Call target disposes of pte_placemark
4595 				 * and may destroy but will not dispose
4596 				 * of pt_pv.
4597 				 */
4598 				info->func(pmap, info, pte_placemark, pt_pv,
4599 					   sva, ptep, info->arg);
4600 			} else {
4601 				/*
4602 				 * Unmanaged pte
4603 				 *
4604 				 * We must unlock pd_pv across the callback
4605 				 * to avoid deadlocks on any recursive
4606 				 * disposal.  Re-check that it still exists
4607 				 * after re-locking.
4608 				 *
4609 				 * Call target disposes of pte_placemark
4610 				 * and may destroy but will not dispose
4611 				 * of pt_pv.
4612 				 */
4613 				KASSERT((oldpte & pmap->pmap_bits[PG_V_IDX]),
4614 				    ("badD *ptep %016lx/%016lx sva %016lx ",
4615 				     *ptep, oldpte, sva));
4616 				info->func(pmap, info, pte_placemark, pt_pv,
4617 					   sva, ptep, info->arg);
4618 			}
4619 			if (pd_pv) {
4620 				pv_lock(pd_pv);
4621 				if (vm_page_unwire_quick(pd_pv->pv_m)) {
4622 					panic("pmap_scan_callback: "
4623 					      "bad wirecount on pd_pv");
4624 				}
4625 				if (pd_pv->pv_pmap == NULL) {
4626 					va_next = sva;		/* retry */
4627 					break;
4628 				}
4629 			}
4630 
4631 			/*
4632 			 * NOTE: The cached pt_pv can be removed from the
4633 			 *	 pmap when pmap_dynamic_delete is enabled,
4634 			 *	 which will cause ptep to become stale.
4635 			 *
4636 			 *	 This also means that no pages remain under
4637 			 *	 the PT, so we can just break out of the inner
4638 			 *	 loop and let the outer loop clean everything
4639 			 *	 up.
4640 			 */
4641 			if (pt_pv && pt_pv->pv_pmap != pmap)
4642 				break;
4643 			sva += PAGE_SIZE;
4644 			++ptep;
4645 		}
4646 	}
4647 	if (pd_pv) {
4648 		pv_put(pd_pv);
4649 		pd_pv = NULL;
4650 	}
4651 	if (pt_pv) {
4652 		pv_put(pt_pv);
4653 		pt_pv = NULL;
4654 	}
4655 	if ((++info->count & 7) == 0)
4656 		lwkt_user_yield();
4657 
4658 	/*
4659 	 * Relock before returning.
4660 	 */
4661 	spin_lock(&pmap->pm_spin);
4662 	return (0);
4663 }
4664 
4665 void
4666 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4667 {
4668 	struct pmap_scan_info info;
4669 
4670 	info.pmap = pmap;
4671 	info.sva = sva;
4672 	info.eva = eva;
4673 	info.func = pmap_remove_callback;
4674 	info.arg = NULL;
4675 	pmap_scan(&info, 1);
4676 #if 0
4677 	cpu_invltlb();
4678 	if (eva - sva < 1024*1024) {
4679 		while (sva < eva) {
4680 			cpu_invlpg((void *)sva);
4681 			sva += PAGE_SIZE;
4682 		}
4683 	}
4684 #endif
4685 }
4686 
4687 static void
4688 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4689 {
4690 	struct pmap_scan_info info;
4691 
4692 	info.pmap = pmap;
4693 	info.sva = sva;
4694 	info.eva = eva;
4695 	info.func = pmap_remove_callback;
4696 	info.arg = NULL;
4697 	pmap_scan(&info, 0);
4698 }
4699 
4700 static void
4701 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
4702 		     vm_pindex_t *pte_placemark, pv_entry_t pt_pv,
4703 		     vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
4704 {
4705 	pt_entry_t pte;
4706 	vm_page_t oldm;
4707 
4708 	/*
4709 	 * Managed or unmanaged pte (pte_placemark is non-NULL)
4710 	 *
4711 	 * pt_pv's wire_count is still bumped by unmanaged pages
4712 	 * so we must decrement it manually.
4713 	 *
4714 	 * We have to unwire the target page table page.
4715 	 */
4716 	pte = *ptep;
4717 	if (pte & pmap->pmap_bits[PG_MANAGED_IDX]) {
4718 		oldm = PHYS_TO_VM_PAGE(pte & PG_FRAME);
4719 		atomic_add_long(&oldm->md.interlock_count, 1);
4720 	} else {
4721 		oldm = NULL;
4722 	}
4723 
4724 	pte = pmap_inval_bulk(info->bulk, va, ptep, 0);
4725 	if (pte & pmap->pmap_bits[PG_MANAGED_IDX]) {
4726 		vm_page_t p;
4727 
4728 		p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
4729 		KKASSERT(pte & pmap->pmap_bits[PG_V_IDX]);
4730 		if (pte & pmap->pmap_bits[PG_M_IDX])
4731 			vm_page_dirty(p);
4732 		if (pte & pmap->pmap_bits[PG_A_IDX])
4733 			vm_page_flag_set(p, PG_REFERENCED);
4734 
4735 		/*
4736 		 * (p) is not hard-busied.
4737 		 *
4738 		 * We can safely clear PG_MAPPED and PG_WRITEABLE only
4739 		 * if PG_MAPPEDMULTI is not set, atomically.
4740 		 */
4741 		pmap_removed_pte(p, pte);
4742 	}
4743 	if (pte & pmap->pmap_bits[PG_V_IDX]) {
4744 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
4745 		if (pt_pv && vm_page_unwire_quick(pt_pv->pv_m))
4746 			panic("pmap_remove: insufficient wirecount");
4747 	}
4748 	if (pte & pmap->pmap_bits[PG_W_IDX])
4749 		atomic_add_long(&pmap->pm_stats.wired_count, -1);
4750 	if (pte & pmap->pmap_bits[PG_G_IDX])
4751 		cpu_invlpg((void *)va);
4752 	pv_placemarker_wakeup(pmap, pte_placemark);
4753 	if (oldm) {
4754 		if ((atomic_fetchadd_long(&oldm->md.interlock_count, -1) &
4755 		     0x7FFFFFFFFFFFFFFFLU) == 0x4000000000000001LU) {
4756 			atomic_clear_long(&oldm->md.interlock_count,
4757 					  0x4000000000000000LU);
4758 			wakeup(&oldm->md.interlock_count);
4759 		}
4760 	}
4761 }
4762 
4763 /*
4764  * Removes this physical page from all physical maps in which it resides.
4765  * Reflects back modify bits to the pager.
4766  *
4767  * This routine may not be called from an interrupt.
4768  *
4769  * The page must be busied by its caller, preventing new ptes from being
4770  * installed.  This allows us to assert that pmap_count is zero and safely
4771  * clear the MAPPED and WRITEABLE bits upon completion.
4772  */
4773 static
4774 void
4775 pmap_remove_all(vm_page_t m)
4776 {
4777 	long icount;
4778 	int retry;
4779 
4780 	if (__predict_false(!pmap_initialized))
4781 		return;
4782 
4783 	/*
4784 	 * pmap_count doesn't cover fictitious pages, but PG_MAPPED does
4785 	 * (albeit without certain race protections).
4786 	 */
4787 #if 0
4788 	if (m->md.pmap_count == 0)
4789 		return;
4790 #endif
4791 	if ((m->flags & PG_MAPPED) == 0)
4792 		return;
4793 
4794 	retry = ticks + hz * 60;
4795 again:
4796 	PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) {
4797 		if (!pmap_inval_smp_cmpset(ipmap, iva, iptep, ipte, 0))
4798 			PMAP_PAGE_BACKING_RETRY;
4799 		if (ipte & ipmap->pmap_bits[PG_MANAGED_IDX]) {
4800 			if (ipte & ipmap->pmap_bits[PG_M_IDX])
4801 				vm_page_dirty(m);
4802 			if (ipte & ipmap->pmap_bits[PG_A_IDX])
4803 				vm_page_flag_set(m, PG_REFERENCED);
4804 
4805 			/*
4806 			 * NOTE: m is not hard-busied so it is not safe to
4807 			 *	 clear PG_MAPPED and PG_WRITEABLE on the 1->0
4808 			 *	 transition against them being set in
4809 			 *	 pmap_enter().
4810 			 */
4811 			pmap_removed_pte(m, ipte);
4812 		}
4813 
4814 		/*
4815 		 * Cleanup various tracking counters.  pt_pv can't go away
4816 		 * due to our wired ref.
4817 		 */
4818 		if (ipmap != kernel_pmap) {
4819 			pv_entry_t pt_pv;
4820 
4821 			spin_lock_shared(&ipmap->pm_spin);
4822 			pt_pv = pv_entry_lookup(ipmap, pmap_pt_pindex(iva));
4823 			spin_unlock_shared(&ipmap->pm_spin);
4824 
4825 			if (pt_pv) {
4826 				if (vm_page_unwire_quick(pt_pv->pv_m)) {
4827 					panic("pmap_remove_all: bad "
4828 					      "wire_count on pt_pv");
4829 				}
4830 				atomic_add_long(
4831 					&ipmap->pm_stats.resident_count, -1);
4832 			}
4833 		}
4834 		if (ipte & ipmap->pmap_bits[PG_W_IDX])
4835 			atomic_add_long(&ipmap->pm_stats.wired_count, -1);
4836 		if (ipte & ipmap->pmap_bits[PG_G_IDX])
4837 			cpu_invlpg((void *)iva);
4838 	} PMAP_PAGE_BACKING_DONE;
4839 
4840 	/*
4841 	 * If our scan lost a pte swap race oldm->md.interlock_count might
4842 	 * be set from the pmap_enter() code.  If so sleep a little and try
4843 	 * again.
4844 	 */
4845 	icount = atomic_fetchadd_long(&m->md.interlock_count,
4846 				      0x8000000000000000LU) +
4847 		 0x8000000000000000LU;
4848 	cpu_ccfence();
4849 	while (icount & 0x3FFFFFFFFFFFFFFFLU) {
4850 		tsleep_interlock(&m->md.interlock_count, 0);
4851 		if (atomic_fcmpset_long(&m->md.interlock_count, &icount,
4852 					icount | 0x4000000000000000LU)) {
4853 			tsleep(&m->md.interlock_count, PINTERLOCKED,
4854 			       "pgunm", 1);
4855 			icount = m->md.interlock_count;
4856 			if (retry - ticks > 0)
4857 				goto again;
4858 			panic("pmap_remove_all: cannot return interlock_count "
4859 			      "to 0 (%p, %ld)",
4860 			      m, m->md.interlock_count);
4861 		}
4862 	}
4863 	vm_page_flag_clear(m, PG_MAPPED | PG_MAPPEDMULTI | PG_WRITEABLE);
4864 }
4865 
4866 /*
4867  * Removes the page from a particular pmap.
4868  *
4869  * The page must be busied by the caller.
4870  */
4871 void
4872 pmap_remove_specific(pmap_t pmap_match, vm_page_t m)
4873 {
4874 	if (__predict_false(!pmap_initialized))
4875 		return;
4876 
4877 	/*
4878 	 * PG_MAPPED test works for both non-fictitious and fictitious pages.
4879 	 */
4880 	if ((m->flags & PG_MAPPED) == 0)
4881 		return;
4882 
4883 	PMAP_PAGE_BACKING_SCAN(m, pmap_match, ipmap, iptep, ipte, iva) {
4884 		if (!pmap_inval_smp_cmpset(ipmap, iva, iptep, ipte, 0))
4885 			PMAP_PAGE_BACKING_RETRY;
4886 		if (ipte & ipmap->pmap_bits[PG_MANAGED_IDX]) {
4887 			if (ipte & ipmap->pmap_bits[PG_M_IDX])
4888 				vm_page_dirty(m);
4889 			if (ipte & ipmap->pmap_bits[PG_A_IDX])
4890 				vm_page_flag_set(m, PG_REFERENCED);
4891 
4892 			/*
4893 			 * NOTE: m is not hard-busied so it is not safe to
4894 			 *	 clear PG_MAPPED and PG_WRITEABLE on the 1->0
4895 			 *	 transition against them being set in
4896 			 *	 pmap_enter().
4897 			 */
4898 			pmap_removed_pte(m, ipte);
4899 		}
4900 
4901 		/*
4902 		 * Cleanup various tracking counters.  pt_pv can't go away
4903 		 * due to our wired ref.
4904 		 */
4905 		if (ipmap != kernel_pmap) {
4906 			pv_entry_t pt_pv;
4907 
4908 			spin_lock_shared(&ipmap->pm_spin);
4909 			pt_pv = pv_entry_lookup(ipmap, pmap_pt_pindex(iva));
4910 			spin_unlock_shared(&ipmap->pm_spin);
4911 
4912 			if (pt_pv) {
4913 				atomic_add_long(
4914 					&ipmap->pm_stats.resident_count, -1);
4915 				if (vm_page_unwire_quick(pt_pv->pv_m)) {
4916 					panic("pmap_remove_specific: bad "
4917 					      "wire_count on pt_pv");
4918 				}
4919 			}
4920 		}
4921 		if (ipte & ipmap->pmap_bits[PG_W_IDX])
4922 			atomic_add_long(&ipmap->pm_stats.wired_count, -1);
4923 		if (ipte & ipmap->pmap_bits[PG_G_IDX])
4924 			cpu_invlpg((void *)iva);
4925 	} PMAP_PAGE_BACKING_DONE;
4926 }
4927 
4928 /*
4929  * Set the physical protection on the specified range of this map
4930  * as requested.  This function is typically only used for debug watchpoints
4931  * and COW pages.
4932  *
4933  * This function may not be called from an interrupt if the map is
4934  * not the kernel_pmap.
4935  *
4936  * NOTE!  For shared page table pages we just unmap the page.
4937  */
4938 void
4939 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4940 {
4941 	struct pmap_scan_info info;
4942 	/* JG review for NX */
4943 
4944 	if (pmap == NULL)
4945 		return;
4946 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == VM_PROT_NONE) {
4947 		pmap_remove(pmap, sva, eva);
4948 		return;
4949 	}
4950 	if (prot & VM_PROT_WRITE)
4951 		return;
4952 	info.pmap = pmap;
4953 	info.sva = sva;
4954 	info.eva = eva;
4955 	info.func = pmap_protect_callback;
4956 	info.arg = &prot;
4957 	pmap_scan(&info, 1);
4958 }
4959 
4960 static
4961 void
4962 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
4963 		      vm_pindex_t *pte_placemark,
4964 		      pv_entry_t pt_pv, vm_offset_t va,
4965 		      pt_entry_t *ptep, void *arg __unused)
4966 {
4967 	pt_entry_t pbits;
4968 	pt_entry_t cbits;
4969 	vm_page_t m;
4970 
4971 again:
4972 	pbits = *ptep;
4973 	cpu_ccfence();
4974 	cbits = pbits;
4975 	if (pbits & pmap->pmap_bits[PG_MANAGED_IDX]) {
4976 		cbits &= ~pmap->pmap_bits[PG_A_IDX];
4977 		cbits &= ~pmap->pmap_bits[PG_M_IDX];
4978 	}
4979 	/* else unmanaged page, adjust bits, no wire changes */
4980 
4981 	if (ptep) {
4982 		cbits &= ~pmap->pmap_bits[PG_RW_IDX];
4983 #ifdef PMAP_DEBUG2
4984 		if (pmap_enter_debug > 0) {
4985 			--pmap_enter_debug;
4986 			kprintf("pmap_protect va=%lx ptep=%p "
4987 				"pt_pv=%p cbits=%08lx\n",
4988 				va, ptep, pt_pv, cbits
4989 			);
4990 		}
4991 #endif
4992 		if (pbits != cbits) {
4993 			if (!pmap_inval_smp_cmpset(pmap, va,
4994 						   ptep, pbits, cbits)) {
4995 				goto again;
4996 			}
4997 		}
4998 		if (pbits & pmap->pmap_bits[PG_MANAGED_IDX]) {
4999 			m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
5000 			if (pbits & pmap->pmap_bits[PG_A_IDX])
5001 				vm_page_flag_set(m, PG_REFERENCED);
5002 			if (pbits & pmap->pmap_bits[PG_M_IDX])
5003 				vm_page_dirty(m);
5004 		}
5005 	}
5006 	pv_placemarker_wakeup(pmap, pte_placemark);
5007 }
5008 
5009 /*
5010  * Insert the vm_page (m) at the virtual address (va), replacing any prior
5011  * mapping at that address.  Set protection and wiring as requested.
5012  *
5013  * If entry is non-NULL we check to see if the SEG_SIZE optimization is
5014  * possible.  If it is we enter the page into the appropriate shared pmap
5015  * hanging off the related VM object instead of the passed pmap, then we
5016  * share the page table page from the VM object's pmap into the current pmap.
5017  *
5018  * NOTE: This routine MUST insert the page into the pmap now, it cannot
5019  *	 lazy-evaluate.
5020  */
5021 void
5022 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
5023 	   boolean_t wired, vm_map_entry_t entry)
5024 {
5025 	pv_entry_t pt_pv;	/* page table */
5026 	pv_entry_t pte_pv;	/* page table entry */
5027 	vm_pindex_t *pte_placemark;
5028 	pt_entry_t *ptep;
5029 	pt_entry_t origpte;
5030 	vm_paddr_t opa;
5031 	vm_page_t oldm;
5032 	pt_entry_t newpte;
5033 	vm_paddr_t pa;
5034 	int flags;
5035 	int nflags;
5036 
5037 	if (pmap == NULL)
5038 		return;
5039 	va = trunc_page(va);
5040 #ifdef PMAP_DIAGNOSTIC
5041 	if (va >= KvaEnd)
5042 		panic("pmap_enter: toobig");
5043 	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
5044 		panic("pmap_enter: invalid to pmap_enter page table "
5045 		      "pages (va: 0x%lx)", va);
5046 #endif
5047 	if (va < UPT_MAX_ADDRESS && pmap == kernel_pmap) {
5048 		kprintf("Warning: pmap_enter called on UVA with "
5049 			"kernel_pmap\n");
5050 #ifdef DDB
5051 		db_print_backtrace();
5052 #endif
5053 	}
5054 	if (va >= UPT_MAX_ADDRESS && pmap != kernel_pmap) {
5055 		kprintf("Warning: pmap_enter called on KVA without"
5056 			"kernel_pmap\n");
5057 #ifdef DDB
5058 		db_print_backtrace();
5059 #endif
5060 	}
5061 
5062 	/*
5063 	 * Get the locked page table page (pt_pv) for our new page table
5064 	 * entry, allocating it if necessary.
5065 	 *
5066 	 * There is no pte_pv for a terminal pte so the terminal pte will
5067 	 * be locked via pte_placemark.
5068 	 *
5069 	 * Only MMU actions by the CPU itself can modify the ptep out from
5070 	 * under us.
5071 	 *
5072 	 * If the pmap is still being initialized we assume existing
5073 	 * page tables.
5074 	 *
5075 	 * NOTE: Kernel mapppings do not track page table pages
5076 	 *	 (i.e. there is no pt_pv pt_pv structure).
5077 	 *
5078 	 * NOTE: origpte here is 'tentative', used only to check for
5079 	 *	 the degenerate case where the entry already exists and
5080 	 *	 matches.
5081 	 */
5082 	if (__predict_false(pmap_initialized == FALSE)) {
5083 		pte_pv = NULL;
5084 		pt_pv = NULL;
5085 		pte_placemark = NULL;
5086 		ptep = vtopte(va);
5087 		origpte = *ptep;
5088 	} else {
5089 		pte_pv = pv_get(pmap, pmap_pte_pindex(va), &pte_placemark);
5090 		KKASSERT(pte_pv == NULL);
5091 		if (va >= VM_MAX_USER_ADDRESS) {
5092 			pt_pv = NULL;
5093 			ptep = vtopte(va);
5094 		} else {
5095 			pt_pv = pmap_allocpte(pmap, pmap_pt_pindex(va), NULL);
5096 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5097 		}
5098 		origpte = *ptep;
5099 		cpu_ccfence();
5100 	}
5101 
5102 	pa = VM_PAGE_TO_PHYS(m);
5103 
5104 	/*
5105 	 * Calculate the new PTE.
5106 	 */
5107 	newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) |
5108 		 pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]);
5109 	if (wired)
5110 		newpte |= pmap->pmap_bits[PG_W_IDX];
5111 	if (va < VM_MAX_USER_ADDRESS)
5112 		newpte |= pmap->pmap_bits[PG_U_IDX];
5113 	if ((m->flags & PG_FICTITIOUS) == 0)
5114 		newpte |= pmap->pmap_bits[PG_MANAGED_IDX];
5115 //	if (pmap == kernel_pmap)
5116 //		newpte |= pgeflag;
5117 	newpte |= pmap->pmap_cache_bits_pte[m->pat_mode];
5118 
5119 	/*
5120 	 * It is possible for multiple faults to occur in threaded
5121 	 * environments, the existing pte might be correct.
5122 	 */
5123 	if (((origpte ^ newpte) &
5124 	    ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] |
5125 			  pmap->pmap_bits[PG_A_IDX])) == 0) {
5126 		goto done;
5127 	}
5128 
5129 	/*
5130 	 * Adjust page flags.  The page is soft-busied or hard-busied, we
5131 	 * should be able to safely set PG_* flag bits even with the (shared)
5132 	 * soft-busy.
5133 	 *
5134 	 * The pmap_count and writeable_count is only tracked for
5135 	 * non-fictitious pages.  As a bit of a safety, bump pmap_count
5136 	 * and set the PG_* bits before mapping the page.  If another part
5137 	 * of the system does not properly hard-busy the page (against our
5138 	 * soft-busy or hard-busy) in order to remove mappings it might not
5139 	 * see the pte that we are about to add and thus will not be able to
5140 	 * drop pmap_count to 0.
5141 	 *
5142 	 * The PG_MAPPED and PG_WRITEABLE flags are set for any type of page.
5143 	 *
5144 	 * NOTE! PG_MAPPED and PG_WRITEABLE can only be cleared when
5145 	 *	 the page is hard-busied AND pmap_count is 0.  This
5146 	 *	 interlocks our setting of the flags here.
5147 	 */
5148 	/*vm_page_spin_lock(m);*/
5149 
5150 	/*
5151 	 * In advanced mode we keep track of single mappings verses
5152 	 * multiple mappings in order to avoid unnecessary vm_page_protect()
5153 	 * calls (particularly on the kernel_map).
5154 	 *
5155 	 * If non-advanced mode we track the mapping count for similar effect.
5156 	 *
5157 	 * Avoid modifying the vm_page as much as possible, conditionalize
5158 	 * updates to reduce cache line ping-ponging.
5159 	 */
5160 	flags = m->flags;
5161 	cpu_ccfence();
5162 	for (;;) {
5163 		nflags = PG_MAPPED;
5164 		if (newpte & pmap->pmap_bits[PG_RW_IDX])
5165 			nflags |= PG_WRITEABLE;
5166 		if (flags & PG_MAPPED)
5167 			nflags |= PG_MAPPEDMULTI;
5168 		if (flags == (flags | nflags))
5169 			break;
5170 		if (atomic_fcmpset_int(&m->flags, &flags, flags | nflags))
5171 			break;
5172 	}
5173 	/*vm_page_spin_unlock(m);*/
5174 
5175 	/*
5176 	 * A race can develop when replacing an existing mapping.  The new
5177 	 * page has been busied and the pte is placemark-locked, but the
5178 	 * old page could be ripped out from under us at any time by
5179 	 * a backing scan.
5180 	 *
5181 	 * If we do nothing, a concurrent backing scan may clear
5182 	 * PG_WRITEABLE and PG_MAPPED before we can act on oldm.
5183 	 */
5184 	opa = origpte & PG_FRAME;
5185 	if (opa && (origpte & pmap->pmap_bits[PG_MANAGED_IDX])) {
5186 		oldm = PHYS_TO_VM_PAGE(opa);
5187 		KKASSERT(opa == oldm->phys_addr);
5188 		KKASSERT(entry != NULL);
5189 		atomic_add_long(&oldm->md.interlock_count, 1);
5190 	} else {
5191 		oldm = NULL;
5192 	}
5193 
5194 	/*
5195 	 * Swap the new and old PTEs and perform any necessary SMP
5196 	 * synchronization.
5197 	 */
5198 	if ((prot & VM_PROT_NOSYNC) || (opa == 0 && pt_pv != NULL)) {
5199 		/*
5200 		 * Explicitly permitted to avoid pmap cpu mask synchronization
5201 		 * or the prior content of a non-kernel-related pmap was
5202 		 * invalid.
5203 		 */
5204 		origpte = atomic_swap_long(ptep, newpte);
5205 		if (opa)
5206 			cpu_invlpg((void *)va);
5207 	} else {
5208 		/*
5209 		 * Not permitted to avoid pmap cpu mask synchronization
5210 		 * or there prior content being replaced or this is a kernel
5211 		 * related pmap.
5212 		 *
5213 		 * Due to other kernel optimizations, we cannot assume a
5214 		 * 0->non_zero transition of *ptep can be done with a swap.
5215 		 */
5216 		origpte = pmap_inval_smp(pmap, va, 1, ptep, newpte);
5217 	}
5218 	opa = origpte & PG_FRAME;
5219 
5220 #ifdef PMAP_DEBUG2
5221 	if (pmap_enter_debug > 0) {
5222 		--pmap_enter_debug;
5223 		kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p"
5224 			" pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n",
5225 			va, m,
5226 			origpte, newpte, ptep,
5227 			pte_pv, pt_pv, opa, prot);
5228 	}
5229 #endif
5230 
5231 	/*
5232 	 * Account for the changes in the pt_pv and pmap.
5233 	 *
5234 	 * Retain the same wiring count due to replacing an existing page,
5235 	 * or bump the wiring count for a new page.
5236 	 */
5237 	if (pt_pv && opa == 0) {
5238 		vm_page_wire_quick(pt_pv->pv_m);
5239 		atomic_add_long(&pt_pv->pv_pmap->pm_stats.resident_count, 1);
5240 	}
5241 	if (wired && (origpte & pmap->pmap_bits[PG_W_IDX]) == 0)
5242 		atomic_add_long(&pmap->pm_stats.wired_count, 1);
5243 
5244 	/*
5245 	 * Account for the removal of the old page.  pmap and pt_pv stats
5246 	 * have already been fully adjusted for both.
5247 	 *
5248 	 * WARNING! oldm is not soft or hard-busied.  The pte at worst can
5249 	 *	    only be removed out from under us since we hold the
5250 	 *	    placemarker.  So if it is still there, it must not have
5251 	 *	    changed.
5252 	 *
5253 	 * WARNING! A backing scan can clear PG_WRITEABLE and/or PG_MAPPED
5254 	 *	    and rip oldm away from us, possibly even freeing or
5255 	 *	    paging it, and not setting our dirtying below.
5256 	 *
5257 	 *	    To deal with this, oldm->md.interlock_count is bumped
5258 	 *	    to indicate that we might (only might) have won the pte
5259 	 *	    swap race, and then released below.
5260 	 */
5261 	if (opa && (origpte & pmap->pmap_bits[PG_MANAGED_IDX])) {
5262 		KKASSERT(oldm == PHYS_TO_VM_PAGE(opa));
5263 		if (origpte & pmap->pmap_bits[PG_M_IDX])
5264 			vm_page_dirty(oldm);
5265 		if (origpte & pmap->pmap_bits[PG_A_IDX])
5266 			vm_page_flag_set(oldm, PG_REFERENCED);
5267 
5268 		/*
5269 		 * NOTE: oldm is not hard-busied so it is not safe to
5270 		 *	 clear PG_MAPPED and PG_WRITEABLE on the 1->0
5271 		 *	 transition against them being set in
5272 		 *	 pmap_enter().
5273 		 */
5274 		pmap_removed_pte(oldm, origpte);
5275 	}
5276 	if (oldm) {
5277 		if ((atomic_fetchadd_long(&oldm->md.interlock_count, -1) &
5278 		     0x7FFFFFFFFFFFFFFFLU) == 0x4000000000000001LU) {
5279 			atomic_clear_long(&oldm->md.interlock_count,
5280 					  0x4000000000000000LU);
5281 			wakeup(&oldm->md.interlock_count);
5282 		}
5283 	}
5284 
5285 done:
5286 	KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 ||
5287 		 (m->flags & PG_MAPPED));
5288 
5289 	/*
5290 	 * Cleanup the pv entry, allowing other accessors.  If the new page
5291 	 * is not managed but we have a pte_pv (which was locking our
5292 	 * operation), we can free it now.  pte_pv->pv_m should be NULL.
5293 	 */
5294 	if (pte_placemark)
5295 		pv_placemarker_wakeup(pmap, pte_placemark);
5296 	if (pt_pv)
5297 		pv_put(pt_pv);
5298 }
5299 
5300 /*
5301  * Make a temporary mapping for a physical address.  This is only intended
5302  * to be used for panic dumps.
5303  *
5304  * The caller is responsible for calling smp_invltlb().
5305  */
5306 void *
5307 pmap_kenter_temporary(vm_paddr_t pa, long i)
5308 {
5309 	pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
5310 	return ((void *)crashdumpmap);
5311 }
5312 
5313 #if 0
5314 #define MAX_INIT_PT (96)
5315 
5316 /*
5317  * This routine preloads the ptes for a given object into the specified pmap.
5318  * This eliminates the blast of soft faults on process startup and
5319  * immediately after an mmap.
5320  */
5321 static int pmap_object_init_pt_callback(vm_page_t p, void *data);
5322 #endif
5323 
5324 void
5325 pmap_object_init_pt(pmap_t pmap, vm_map_entry_t entry,
5326 		    vm_offset_t addr, vm_size_t size, int limit)
5327 {
5328 #if 0
5329 	vm_prot_t prot = entry->protection;
5330 	vm_object_t object = entry->ba.object;
5331 	vm_pindex_t pindex = atop(entry->ba.offset + (addr - entry->ba.start));
5332 	struct rb_vm_page_scan_info info;
5333 	struct lwp *lp;
5334 	vm_size_t psize;
5335 
5336 	/*
5337 	 * We can't preinit if read access isn't set or there is no pmap
5338 	 * or object.
5339 	 */
5340 	if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
5341 		return;
5342 
5343 	/*
5344 	 * We can't preinit if the pmap is not the current pmap
5345 	 */
5346 	lp = curthread->td_lwp;
5347 	if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
5348 		return;
5349 
5350 	/*
5351 	 * Misc additional checks
5352 	 */
5353 	psize = x86_64_btop(size);
5354 
5355 	if ((object->type != OBJT_VNODE) ||
5356 		((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
5357 			(object->resident_page_count > MAX_INIT_PT))) {
5358 		return;
5359 	}
5360 
5361 	if (pindex + psize > object->size) {
5362 		if (object->size < pindex)
5363 			return;
5364 		psize = object->size - pindex;
5365 	}
5366 
5367 	if (psize == 0)
5368 		return;
5369 
5370 	/*
5371 	 * If everything is segment-aligned do not pre-init here.  Instead
5372 	 * allow the normal vm_fault path to pass a segment hint to
5373 	 * pmap_enter() which will then use an object-referenced shared
5374 	 * page table page.
5375 	 */
5376 	if ((addr & SEG_MASK) == 0 &&
5377 	    (ctob(psize) & SEG_MASK) == 0 &&
5378 	    (ctob(pindex) & SEG_MASK) == 0) {
5379 		return;
5380 	}
5381 
5382 	/*
5383 	 * Use a red-black scan to traverse the requested range and load
5384 	 * any valid pages found into the pmap.
5385 	 *
5386 	 * We cannot safely scan the object's memq without holding the
5387 	 * object token.
5388 	 */
5389 	info.start_pindex = pindex;
5390 	info.end_pindex = pindex + psize - 1;
5391 	info.limit = limit;
5392 	info.mpte = NULL;
5393 	info.addr = addr;
5394 	info.pmap = pmap;
5395 	info.object = object;
5396 	info.entry = entry;
5397 
5398 	/*
5399 	 * By using the NOLK scan, the callback function must be sure
5400 	 * to return -1 if the VM page falls out of the object.
5401 	 */
5402 	vm_object_hold_shared(object);
5403 	vm_page_rb_tree_RB_SCAN_NOLK(&object->rb_memq, rb_vm_page_scancmp,
5404 				     pmap_object_init_pt_callback, &info);
5405 	vm_object_drop(object);
5406 #endif
5407 }
5408 
5409 #if 0
5410 
5411 static
5412 int
5413 pmap_object_init_pt_callback(vm_page_t p, void *data)
5414 {
5415 	struct rb_vm_page_scan_info *info = data;
5416 	vm_pindex_t rel_index;
5417 	int hard_busy;
5418 
5419 	/*
5420 	 * don't allow an madvise to blow away our really
5421 	 * free pages allocating pv entries.
5422 	 */
5423 	if ((info->limit & MAP_PREFAULT_MADVISE) &&
5424 		vmstats.v_free_count < vmstats.v_free_reserved) {
5425 		    return(-1);
5426 	}
5427 
5428 	/*
5429 	 * Ignore list markers and ignore pages we cannot instantly
5430 	 * busy (while holding the object token).
5431 	 */
5432 	if (p->flags & PG_MARKER)
5433 		return 0;
5434 	hard_busy = 0;
5435 again:
5436 	if (hard_busy) {
5437 		if (vm_page_busy_try(p, TRUE))
5438 			return 0;
5439 	} else {
5440 		if (vm_page_sbusy_try(p))
5441 			return 0;
5442 	}
5443 	if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
5444 	    (p->flags & PG_FICTITIOUS) == 0) {
5445 		if ((p->queue - p->pc) == PQ_CACHE) {
5446 			if (hard_busy == 0) {
5447 				vm_page_sbusy_drop(p);
5448 				hard_busy = 1;
5449 				goto again;
5450 			}
5451 			vm_page_deactivate(p);
5452 		}
5453 		rel_index = p->pindex - info->start_pindex;
5454 		pmap_enter(info->pmap, info->addr + x86_64_ptob(rel_index), p,
5455 			   VM_PROT_READ, FALSE, info->entry);
5456 	}
5457 	if (hard_busy)
5458 		vm_page_wakeup(p);
5459 	else
5460 		vm_page_sbusy_drop(p);
5461 
5462 	/*
5463 	 * We are using an unlocked scan (that is, the scan expects its
5464 	 * current element to remain in the tree on return).  So we have
5465 	 * to check here and abort the scan if it isn't.
5466 	 */
5467 	if (p->object != info->object)
5468 		return -1;
5469 	lwkt_yield();
5470 	return(0);
5471 }
5472 
5473 #endif
5474 
5475 /*
5476  * Return TRUE if the pmap is in shape to trivially pre-fault the specified
5477  * address.
5478  *
5479  * Returns FALSE if it would be non-trivial or if a pte is already loaded
5480  * into the slot.
5481  *
5482  * The address must reside within a vm_map mapped range to ensure that the
5483  * page table doesn't get ripped out from under us.
5484  *
5485  * XXX This is safe only because page table pages are not freed.
5486  */
5487 int
5488 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
5489 {
5490 	pt_entry_t *pte;
5491 
5492 	/*spin_lock(&pmap->pm_spin);*/
5493 	if ((pte = pmap_pte(pmap, addr)) != NULL) {
5494 		if (*pte & pmap->pmap_bits[PG_V_IDX]) {
5495 			/*spin_unlock(&pmap->pm_spin);*/
5496 			return FALSE;
5497 		}
5498 	}
5499 	/*spin_unlock(&pmap->pm_spin);*/
5500 	return TRUE;
5501 }
5502 
5503 /*
5504  * Change the wiring attribute for a pmap/va pair.  The mapping must already
5505  * exist in the pmap.  The mapping may or may not be managed.  The wiring in
5506  * the page is not changed, the page is returned so the caller can adjust
5507  * its wiring (the page is not locked in any way).
5508  *
5509  * Wiring is not a hardware characteristic so there is no need to invalidate
5510  * TLB.  However, in an SMP environment we must use a locked bus cycle to
5511  * update the pte (if we are not using the pmap_inval_*() API that is)...
5512  * it's ok to do this for simple wiring changes.
5513  */
5514 vm_page_t
5515 pmap_unwire(pmap_t pmap, vm_offset_t va)
5516 {
5517 	pt_entry_t *ptep;
5518 	pv_entry_t pt_pv;
5519 	vm_paddr_t pa;
5520 	vm_page_t m;
5521 
5522 	if (pmap == NULL)
5523 		return NULL;
5524 
5525 	/*
5526 	 * Assume elements in the kernel pmap are stable
5527 	 */
5528 	if (pmap == kernel_pmap) {
5529 		if (pmap_pt(pmap, va) == 0)
5530 			return NULL;
5531 		ptep = pmap_pte_quick(pmap, va);
5532 		if (pmap_pte_v(pmap, ptep)) {
5533 			if (pmap_pte_w(pmap, ptep))
5534 				atomic_add_long(&pmap->pm_stats.wired_count,-1);
5535 			atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5536 			pa = *ptep & PG_FRAME;
5537 			m = PHYS_TO_VM_PAGE(pa);
5538 		} else {
5539 			m = NULL;
5540 		}
5541 	} else {
5542 		/*
5543 		 * We can only [un]wire pmap-local pages (we cannot wire
5544 		 * shared pages)
5545 		 */
5546 		pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
5547 		if (pt_pv == NULL)
5548 			return NULL;
5549 
5550 		ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5551 		if ((*ptep & pmap->pmap_bits[PG_V_IDX]) == 0) {
5552 			pv_put(pt_pv);
5553 			return NULL;
5554 		}
5555 
5556 		if (pmap_pte_w(pmap, ptep)) {
5557 			atomic_add_long(&pt_pv->pv_pmap->pm_stats.wired_count,
5558 					-1);
5559 		}
5560 		/* XXX else return NULL so caller doesn't unwire m ? */
5561 
5562 		atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5563 
5564 		pa = *ptep & PG_FRAME;
5565 		m = PHYS_TO_VM_PAGE(pa);	/* held by wired count */
5566 		pv_put(pt_pv);
5567 	}
5568 	return m;
5569 }
5570 
5571 /*
5572  * Copy the range specified by src_addr/len from the source map to
5573  * the range dst_addr/len in the destination map.
5574  *
5575  * This routine is only advisory and need not do anything.
5576  */
5577 void
5578 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
5579 	  vm_size_t len, vm_offset_t src_addr)
5580 {
5581 }
5582 
5583 /*
5584  * pmap_zero_page:
5585  *
5586  *	Zero the specified physical page.
5587  *
5588  *	This function may be called from an interrupt and no locking is
5589  *	required.
5590  */
5591 void
5592 pmap_zero_page(vm_paddr_t phys)
5593 {
5594 	vm_offset_t va = PHYS_TO_DMAP(phys);
5595 
5596 	pagezero((void *)va);
5597 }
5598 
5599 /*
5600  * pmap_zero_page:
5601  *
5602  *	Zero part of a physical page by mapping it into memory and clearing
5603  *	its contents with bzero.
5604  *
5605  *	off and size may not cover an area beyond a single hardware page.
5606  */
5607 void
5608 pmap_zero_page_area(vm_paddr_t phys, int off, int size)
5609 {
5610 	vm_offset_t virt = PHYS_TO_DMAP(phys);
5611 
5612 	bzero((char *)virt + off, size);
5613 }
5614 
5615 /*
5616  * pmap_copy_page:
5617  *
5618  *	Copy the physical page from the source PA to the target PA.
5619  *	This function may be called from an interrupt.  No locking
5620  *	is required.
5621  */
5622 void
5623 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
5624 {
5625 	vm_offset_t src_virt, dst_virt;
5626 
5627 	src_virt = PHYS_TO_DMAP(src);
5628 	dst_virt = PHYS_TO_DMAP(dst);
5629 	bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE);
5630 }
5631 
5632 /*
5633  * pmap_copy_page_frag:
5634  *
5635  *	Copy the physical page from the source PA to the target PA.
5636  *	This function may be called from an interrupt.  No locking
5637  *	is required.
5638  */
5639 void
5640 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
5641 {
5642 	vm_offset_t src_virt, dst_virt;
5643 
5644 	src_virt = PHYS_TO_DMAP(src);
5645 	dst_virt = PHYS_TO_DMAP(dst);
5646 
5647 	bcopy((char *)src_virt + (src & PAGE_MASK),
5648 	      (char *)dst_virt + (dst & PAGE_MASK),
5649 	      bytes);
5650 }
5651 
5652 /*
5653  * Remove all pages from specified address space this aids process exit
5654  * speeds.  Also, this code may be special cased for the current process
5655  * only.
5656  */
5657 void
5658 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5659 {
5660 	pmap_remove_noinval(pmap, sva, eva);
5661 	cpu_invltlb();
5662 }
5663 
5664 /*
5665  * pmap_testbit tests bits in pte's note that the testbit/clearbit
5666  * routines are inline, and a lot of things compile-time evaluate.
5667  *
5668  * Currently only used to test the 'M'odified bit.  If the page
5669  * is not PG_WRITEABLE, the 'M'odified bit cannot be set and we
5670  * return immediately.  Fictitious pages do not track this bit.
5671  */
5672 static
5673 boolean_t
5674 pmap_testbit(vm_page_t m, int bit)
5675 {
5676 	int res = FALSE;
5677 
5678 	if (__predict_false(!pmap_initialized || (m->flags & PG_FICTITIOUS)))
5679 		return FALSE;
5680 	/*
5681 	 * Nothing to do if all the mappings are already read-only.
5682 	 * The page's [M]odify bits have already been synchronized
5683 	 * to the vm_page_t and cleaned out.
5684 	 */
5685 	if (bit == PG_M_IDX && (m->flags & PG_WRITEABLE) == 0)
5686 		return FALSE;
5687 
5688 	/*
5689 	 * Iterate the mapping
5690 	 */
5691 	PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) {
5692 		if (ipte & ipmap->pmap_bits[bit]) {
5693 			res = TRUE;
5694 			break;
5695 		}
5696 	} PMAP_PAGE_BACKING_DONE;
5697 	return res;
5698 }
5699 
5700 /*
5701  * This routine is used to modify bits in ptes.  Only one bit should be
5702  * specified.  PG_RW requires special handling.  This call works with
5703  * any sort of mapped page.  PG_FICTITIOUS pages might not be optimal.
5704  *
5705  * Caller must NOT hold any spin locks
5706  * Caller must hold (m) hard-busied
5707  *
5708  * NOTE: When clearing PG_M we could also (not implemented) drop
5709  *       through to the PG_RW code and clear PG_RW too, forcing
5710  *       a fault on write to redetect PG_M for virtual kernels, but
5711  *       it isn't necessary since virtual kernels invalidate the
5712  *       pte when they clear the VPTE_M bit in their virtual page
5713  *       tables.
5714  *
5715  * NOTE: Does not re-dirty the page when clearing only PG_M.
5716  *
5717  * NOTE: Because we do not lock the pv, *pte can be in a state of
5718  *       flux.  Despite this the value of *pte is still somewhat
5719  *       related while we hold the vm_page spin lock.
5720  *
5721  *       *pte can be zero due to this race.  Since we are clearing
5722  *       bits we basically do no harm when this race occurs.
5723  */
5724 static __inline
5725 void
5726 pmap_clearbit(vm_page_t m, int bit_index)
5727 {
5728 	pt_entry_t npte;
5729 	int retry;
5730 	long icount;
5731 
5732 	/*
5733 	 * Too early in the boot
5734 	 */
5735 	if (__predict_false(!pmap_initialized)) {
5736 		if (bit_index == PG_RW_IDX)
5737 			vm_page_flag_clear(m, PG_WRITEABLE);
5738 		return;
5739 	}
5740 	if ((m->flags & (PG_MAPPED | PG_WRITEABLE)) == 0)
5741 		return;
5742 
5743 	/*
5744 	 * Being asked to clear other random bits, we don't track them
5745 	 * so we have to iterate.
5746 	 *
5747 	 * pmap_clear_reference() is called (into here) with the page
5748 	 * hard-busied to check whether the page is still mapped and
5749 	 * will clear PG_MAPPED and PG_WRITEABLE if it isn't.
5750 	 */
5751 	if (bit_index != PG_RW_IDX) {
5752 #if 0
5753 		long icount;
5754 
5755 		icount = 0;
5756 #endif
5757 		PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) {
5758 #if 0
5759 			++icount;
5760 #endif
5761 			if (ipte & ipmap->pmap_bits[bit_index]) {
5762 				atomic_clear_long(iptep,
5763 						  ipmap->pmap_bits[bit_index]);
5764 			}
5765 		} PMAP_PAGE_BACKING_DONE;
5766 #if 0
5767 		if (icount == 0) {
5768 			icount = atomic_fetchadd_long(&m->md.interlock_count,
5769 						      0x8000000000000000LU);
5770 			if ((icount & 0x3FFFFFFFFFFFFFFFLU) == 0) {
5771 				vm_page_flag_clear(m, PG_MAPPED |
5772 						      PG_MAPPEDMULTI |
5773 						      PG_WRITEABLE);
5774 			}
5775 		}
5776 #endif
5777 		return;
5778 	}
5779 
5780 	/*
5781 	 * Being asked to clear the RW bit.
5782 	 *
5783 	 * Nothing to do if all the mappings are already read-only
5784 	 */
5785 	if ((m->flags & PG_WRITEABLE) == 0)
5786 		return;
5787 
5788 	/*
5789 	 * Iterate the mappings and check.
5790 	 */
5791 	retry = ticks + hz * 60;
5792 again:
5793 	/*
5794 	 * Clear PG_RW. This also clears PG_M and marks the page dirty if
5795 	 * PG_M was set.
5796 	 *
5797 	 * Since the caller holds the page hard-busied we can safely clear
5798 	 * PG_WRITEABLE, and callers expect us to for the PG_RW_IDX path.
5799 	 */
5800 	PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) {
5801 #if 0
5802 		if ((ipte & ipmap->pmap_bits[PG_MANAGED_IDX]) == 0)
5803 			continue;
5804 #endif
5805 		if ((ipte & ipmap->pmap_bits[PG_RW_IDX]) == 0)
5806 			continue;
5807 		npte = ipte & ~(ipmap->pmap_bits[PG_RW_IDX] |
5808 				ipmap->pmap_bits[PG_M_IDX]);
5809 		if (!pmap_inval_smp_cmpset(ipmap, iva, iptep, ipte, npte))
5810 			PMAP_PAGE_BACKING_RETRY;
5811 		if (ipte & ipmap->pmap_bits[PG_M_IDX])
5812 			vm_page_dirty(m);
5813 
5814 		/*
5815 		 * NOTE: m is not hard-busied so it is not safe to
5816 		 *	 clear PG_WRITEABLE on the 1->0 transition
5817 		 *	 against it being set in pmap_enter().
5818 		 *
5819 		 *	 pmap_count and writeable_count are only applicable
5820 		 *	 to non-fictitious pages (PG_MANAGED_IDX from pte)
5821 		 */
5822 	} PMAP_PAGE_BACKING_DONE;
5823 
5824 	/*
5825 	 * If our scan lost a pte swap race oldm->md.interlock_count might
5826 	 * be set from the pmap_enter() code.  If so sleep a little and try
5827 	 * again.
5828 	 *
5829 	 * Use an atomic op to access interlock_count to ensure ordering.
5830 	 */
5831 	icount = atomic_fetchadd_long(&m->md.interlock_count,
5832 				      0x8000000000000000LU) +
5833 		 0x8000000000000000LU;
5834 	cpu_ccfence();
5835 	while (icount & 0x3FFFFFFFFFFFFFFFLU) {
5836 		tsleep_interlock(&m->md.interlock_count, 0);
5837 		if (atomic_fcmpset_long(&m->md.interlock_count, &icount,
5838 				        icount | 0x4000000000000000LU)) {
5839 			tsleep(&m->md.interlock_count, PINTERLOCKED,
5840 			       "pgunm", 1);
5841 			icount = m->md.interlock_count;
5842 			if (retry - ticks > 0)
5843 				goto again;
5844 			panic("pmap_clearbit: cannot return interlock_count "
5845 			      "to 0 (%p, %ld)",
5846 			      m, m->md.interlock_count);
5847 		}
5848 	}
5849 	vm_page_flag_clear(m, PG_WRITEABLE);
5850 }
5851 
5852 /*
5853  * Lower the permission for all mappings to a given page.
5854  *
5855  * Page must be hard-busied by caller.  Because the page is busied by the
5856  * caller, this should not be able to race a pmap_enter().
5857  */
5858 void
5859 pmap_page_protect(vm_page_t m, vm_prot_t prot)
5860 {
5861 	/* JG NX support? */
5862 	if ((prot & VM_PROT_WRITE) == 0) {
5863 		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
5864 			/*
5865 			 * NOTE: pmap_clearbit(.. PG_RW) also clears
5866 			 *	 the PG_WRITEABLE flag in (m).
5867 			 */
5868 			pmap_clearbit(m, PG_RW_IDX);
5869 		} else {
5870 			pmap_remove_all(m);
5871 		}
5872 	}
5873 }
5874 
5875 vm_paddr_t
5876 pmap_phys_address(vm_pindex_t ppn)
5877 {
5878 	return (x86_64_ptob(ppn));
5879 }
5880 
5881 /*
5882  * Return a count of reference bits for a page, clearing those bits.
5883  * It is not necessary for every reference bit to be cleared, but it
5884  * is necessary that 0 only be returned when there are truly no
5885  * reference bits set.
5886  *
5887  * XXX: The exact number of bits to check and clear is a matter that
5888  * should be tested and standardized at some point in the future for
5889  * optimal aging of shared pages.
5890  *
5891  * This routine may not block.
5892  */
5893 int
5894 pmap_ts_referenced(vm_page_t m)
5895 {
5896 	int rval = 0;
5897 	pt_entry_t npte;
5898 
5899 	if (__predict_false(!pmap_initialized || (m->flags & PG_FICTITIOUS)))
5900 		return rval;
5901 	PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) {
5902 		if (ipte & ipmap->pmap_bits[PG_A_IDX]) {
5903 			npte = ipte & ~ipmap->pmap_bits[PG_A_IDX];
5904 			if (!atomic_cmpset_long(iptep, ipte, npte))
5905 				PMAP_PAGE_BACKING_RETRY;
5906 			++rval;
5907 			if (rval > 4)
5908 				break;
5909 		}
5910 	} PMAP_PAGE_BACKING_DONE;
5911 	return rval;
5912 }
5913 
5914 /*
5915  *	pmap_is_modified:
5916  *
5917  *	Return whether or not the specified physical page was modified
5918  *	in any physical maps.
5919  */
5920 boolean_t
5921 pmap_is_modified(vm_page_t m)
5922 {
5923 	boolean_t res;
5924 
5925 	res = pmap_testbit(m, PG_M_IDX);
5926 	return (res);
5927 }
5928 
5929 /*
5930  * Clear the modify bit on the vm_page.
5931  *
5932  * The page must be hard-busied.
5933  */
5934 void
5935 pmap_clear_modify(vm_page_t m)
5936 {
5937 	pmap_clearbit(m, PG_M_IDX);
5938 }
5939 
5940 /*
5941  *	pmap_clear_reference:
5942  *
5943  *	Clear the reference bit on the specified physical page.
5944  */
5945 void
5946 pmap_clear_reference(vm_page_t m)
5947 {
5948 	pmap_clearbit(m, PG_A_IDX);
5949 }
5950 
5951 /*
5952  * Miscellaneous support routines follow
5953  */
5954 
5955 static
5956 void
5957 x86_64_protection_init(void)
5958 {
5959 	uint64_t *kp;
5960 	int prot;
5961 
5962 	/*
5963 	 * NX supported? (boot time loader.conf override only)
5964 	 *
5965 	 * -1	Automatic (sets mode 1)
5966 	 *  0	Disabled
5967 	 *  1	NX implemented, differentiates PROT_READ vs PROT_READ|PROT_EXEC
5968 	 *  2	NX implemented for all cases
5969 	 */
5970 	TUNABLE_INT_FETCH("machdep.pmap_nx_enable", &pmap_nx_enable);
5971 	if ((amd_feature & AMDID_NX) == 0) {
5972 		pmap_bits_default[PG_NX_IDX] = 0;
5973 		pmap_nx_enable = 0;
5974 	} else if (pmap_nx_enable < 0) {
5975 		pmap_nx_enable = 1;		/* default to mode 1 (READ) */
5976 	}
5977 
5978 	/*
5979 	 * 0 is basically read-only access, but also set the NX (no-execute)
5980 	 * bit when VM_PROT_EXECUTE is not specified.
5981 	 */
5982 	kp = protection_codes;
5983 	for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) {
5984 		switch (prot) {
5985 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
5986 			/*
5987 			 * This case handled elsewhere
5988 			 */
5989 			*kp = 0;
5990 			break;
5991 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
5992 			/*
5993 			 * Read-only is 0|NX	(pmap_nx_enable mode >= 1)
5994 			 */
5995 			if (pmap_nx_enable >= 1)
5996 				*kp = pmap_bits_default[PG_NX_IDX];
5997 			break;
5998 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
5999 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
6000 			/*
6001 			 * Execute requires read access
6002 			 */
6003 			*kp = 0;
6004 			break;
6005 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
6006 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
6007 			/*
6008 			 * Write without execute is RW|NX
6009 			 *			(pmap_nx_enable mode >= 2)
6010 			 */
6011 			*kp = pmap_bits_default[PG_RW_IDX];
6012 			if (pmap_nx_enable >= 2)
6013 				*kp |= pmap_bits_default[PG_NX_IDX];
6014 			break;
6015 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
6016 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
6017 			/*
6018 			 * Write with execute is RW
6019 			 */
6020 			*kp = pmap_bits_default[PG_RW_IDX];
6021 			break;
6022 		}
6023 		++kp;
6024 	}
6025 }
6026 
6027 /*
6028  * Map a set of physical memory pages into the kernel virtual
6029  * address space. Return a pointer to where it is mapped. This
6030  * routine is intended to be used for mapping device memory,
6031  * NOT real memory.
6032  *
6033  * NOTE: We can't use pgeflag unless we invalidate the pages one at
6034  *	 a time.
6035  *
6036  * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE}
6037  *	 work whether the cpu supports PAT or not.  The remaining PAT
6038  *	 attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu
6039  *	 supports PAT.
6040  */
6041 void *
6042 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
6043 {
6044 	return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6045 }
6046 
6047 void *
6048 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
6049 {
6050 	return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
6051 }
6052 
6053 void *
6054 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
6055 {
6056 	return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6057 }
6058 
6059 /*
6060  * Map a set of physical memory pages into the kernel virtual
6061  * address space. Return a pointer to where it is mapped. This
6062  * routine is intended to be used for mapping device memory,
6063  * NOT real memory.
6064  */
6065 void *
6066 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6067 {
6068 	vm_offset_t va, tmpva, offset;
6069 	pt_entry_t *pte;
6070 	vm_size_t tmpsize;
6071 
6072 	offset = pa & PAGE_MASK;
6073 	size = roundup(offset + size, PAGE_SIZE);
6074 
6075 	va = kmem_alloc_nofault(kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE);
6076 	if (va == 0)
6077 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
6078 
6079 	pa = pa & ~PAGE_MASK;
6080 	for (tmpva = va, tmpsize = size; tmpsize > 0;) {
6081 		pte = vtopte(tmpva);
6082 		*pte = pa |
6083 		    kernel_pmap->pmap_bits[PG_RW_IDX] |
6084 		    kernel_pmap->pmap_bits[PG_V_IDX] | /* pgeflag | */
6085 		    kernel_pmap->pmap_cache_bits_pte[mode];
6086 		tmpsize -= PAGE_SIZE;
6087 		tmpva += PAGE_SIZE;
6088 		pa += PAGE_SIZE;
6089 	}
6090 	pmap_invalidate_range(kernel_pmap, va, va + size);
6091 	pmap_invalidate_cache_range(va, va + size);
6092 
6093 	return ((void *)(va + offset));
6094 }
6095 
6096 void
6097 pmap_unmapdev(vm_offset_t va, vm_size_t size)
6098 {
6099 	vm_offset_t base, offset;
6100 
6101 	base = va & ~PAGE_MASK;
6102 	offset = va & PAGE_MASK;
6103 	size = roundup(offset + size, PAGE_SIZE);
6104 	pmap_qremove(va, size >> PAGE_SHIFT);
6105 	kmem_free(kernel_map, base, size);
6106 }
6107 
6108 /*
6109  * Sets the memory attribute for the specified page.
6110  */
6111 void
6112 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
6113 {
6114 
6115     m->pat_mode = ma;
6116 
6117     /*
6118      * If "m" is a normal page, update its direct mapping.  This update
6119      * can be relied upon to perform any cache operations that are
6120      * required for data coherence.
6121      */
6122     if ((m->flags & PG_FICTITIOUS) == 0)
6123         pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode);
6124 }
6125 
6126 /*
6127  * Change the PAT attribute on an existing kernel memory map.  Caller
6128  * must ensure that the virtual memory in question is not accessed
6129  * during the adjustment.
6130  *
6131  * If the va is within the DMAP we cannot use vtopte() because the DMAP
6132  * utilizes 2MB or 1GB pages.  2MB is forced atm so calculate the pd_entry
6133  * pointer based on that.
6134  */
6135 void
6136 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode)
6137 {
6138 	pt_entry_t *pte;
6139 	vm_offset_t base;
6140 	int changed = 0;
6141 
6142 	if (va == 0)
6143 		panic("pmap_change_attr: va is NULL");
6144 	base = trunc_page(va);
6145 
6146 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
6147 		pd_entry_t *pd;
6148 
6149 		KKASSERT(va < DMapMaxAddress);
6150 		pd = (pd_entry_t *)PHYS_TO_DMAP(DMPDphys);
6151 		pd += (va - DMAP_MIN_ADDRESS) >> PDRSHIFT;
6152 
6153 		while ((long)count > 0) {
6154 			*pd =
6155 			   (*pd & ~(pd_entry_t)(kernel_pmap->pmap_cache_mask_pde)) |
6156 			   kernel_pmap->pmap_cache_bits_pde[mode];
6157 			count -= NBPDR / PAGE_SIZE;
6158 			va += NBPDR;
6159 			++pd;
6160 		}
6161 	} else {
6162 		while (count) {
6163 			pte = vtopte(va);
6164 			*pte =
6165 			   (*pte & ~(pt_entry_t)(kernel_pmap->pmap_cache_mask_pte)) |
6166 			   kernel_pmap->pmap_cache_bits_pte[mode];
6167 			--count;
6168 			va += PAGE_SIZE;
6169 		}
6170 	}
6171 
6172 	changed = 1;	/* XXX: not optimal */
6173 
6174 	/*
6175 	 * Flush CPU caches if required to make sure any data isn't cached that
6176 	 * shouldn't be, etc.
6177 	 */
6178 	if (changed) {
6179 		pmap_invalidate_range(kernel_pmap, base, va);
6180 		pmap_invalidate_cache_range(base, va);
6181 	}
6182 }
6183 
6184 /*
6185  * perform the pmap work for mincore
6186  */
6187 int
6188 pmap_mincore(pmap_t pmap, vm_offset_t addr)
6189 {
6190 	pt_entry_t *ptep, pte;
6191 	vm_page_t m;
6192 	int val = 0;
6193 
6194 	ptep = pmap_pte(pmap, addr);
6195 
6196 	if (ptep && (pte = *ptep) != 0) {
6197 		vm_offset_t pa;
6198 
6199 		val = MINCORE_INCORE;
6200 		pa = pte & PG_FRAME;
6201 		if (pte & pmap->pmap_bits[PG_MANAGED_IDX])
6202 			m = PHYS_TO_VM_PAGE(pa);
6203 		else
6204 			m = NULL;
6205 
6206 		/*
6207 		 * Modified by us
6208 		 */
6209 		if (pte & pmap->pmap_bits[PG_M_IDX])
6210 			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
6211 
6212 		/*
6213 		 * Modified by someone
6214 		 */
6215 		else if (m && (m->dirty || pmap_is_modified(m)))
6216 			val |= MINCORE_MODIFIED_OTHER;
6217 
6218 		/*
6219 		 * Referenced by us, or someone else.
6220 		 */
6221 		if (pte & pmap->pmap_bits[PG_A_IDX]) {
6222 			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
6223 		} else if (m && ((m->flags & PG_REFERENCED) ||
6224 				 pmap_ts_referenced(m))) {
6225 			val |= MINCORE_REFERENCED_OTHER;
6226 			vm_page_flag_set(m, PG_REFERENCED);
6227 		}
6228 	}
6229 	return val;
6230 }
6231 
6232 /*
6233  * Replace p->p_vmspace with a new one.  If adjrefs is non-zero the new
6234  * vmspace will be ref'd and the old one will be deref'd.
6235  *
6236  * The vmspace for all lwps associated with the process will be adjusted
6237  * and cr3 will be reloaded if any lwp is the current lwp.
6238  *
6239  * The process must hold the vmspace->vm_map.token for oldvm and newvm
6240  */
6241 void
6242 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
6243 {
6244 	struct vmspace *oldvm;
6245 	struct lwp *lp;
6246 
6247 	oldvm = p->p_vmspace;
6248 	if (oldvm != newvm) {
6249 		if (adjrefs)
6250 			vmspace_ref(newvm);
6251 		p->p_vmspace = newvm;
6252 		KKASSERT(p->p_nthreads == 1);
6253 		lp = RB_ROOT(&p->p_lwp_tree);
6254 		pmap_setlwpvm(lp, newvm);
6255 		if (adjrefs)
6256 			vmspace_rel(oldvm);
6257 	}
6258 }
6259 
6260 /*
6261  * Set the vmspace for a LWP.  The vmspace is almost universally set the
6262  * same as the process vmspace, but virtual kernels need to swap out contexts
6263  * on a per-lwp basis.
6264  *
6265  * Caller does not necessarily hold any vmspace tokens.  Caller must control
6266  * the lwp (typically be in the context of the lwp).  We use a critical
6267  * section to protect against statclock and hardclock (statistics collection).
6268  */
6269 void
6270 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
6271 {
6272 	struct vmspace *oldvm;
6273 	struct pmap *pmap;
6274 	thread_t td;
6275 
6276 	oldvm = lp->lwp_vmspace;
6277 
6278 	if (oldvm != newvm) {
6279 		crit_enter();
6280 		td = curthread;
6281 		KKASSERT((newvm->vm_refcnt & VM_REF_DELETED) == 0);
6282 		lp->lwp_vmspace = newvm;
6283 		if (td->td_lwp == lp) {
6284 			pmap = vmspace_pmap(newvm);
6285 			ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
6286 			if (pmap->pm_active_lock & CPULOCK_EXCL)
6287 				pmap_interlock_wait(newvm);
6288 #if defined(SWTCH_OPTIM_STATS)
6289 			tlb_flush_count++;
6290 #endif
6291 			if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) {
6292 				td->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
6293 				if (meltdown_mitigation && pmap->pm_pmlpv_iso) {
6294 					td->td_pcb->pcb_cr3_iso =
6295 						vtophys(pmap->pm_pml4_iso);
6296 					td->td_pcb->pcb_flags |= PCB_ISOMMU;
6297 				} else {
6298 					td->td_pcb->pcb_cr3_iso = 0;
6299 					td->td_pcb->pcb_flags &= ~PCB_ISOMMU;
6300 				}
6301 			} else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) {
6302 				td->td_pcb->pcb_cr3 = KPML4phys;
6303 				td->td_pcb->pcb_cr3_iso = 0;
6304 				td->td_pcb->pcb_flags &= ~PCB_ISOMMU;
6305 			} else {
6306 				panic("pmap_setlwpvm: unknown pmap type\n");
6307 			}
6308 
6309 			/*
6310 			 * The MMU separation fields needs to be updated.
6311 			 * (it can't access the pcb directly from the
6312 			 * restricted user pmap).
6313 			 */
6314 			{
6315 				struct trampframe *tramp;
6316 
6317 				tramp = &pscpu->trampoline;
6318 				tramp->tr_pcb_cr3 = td->td_pcb->pcb_cr3;
6319 				tramp->tr_pcb_cr3_iso = td->td_pcb->pcb_cr3_iso;
6320 				tramp->tr_pcb_flags = td->td_pcb->pcb_flags;
6321 				tramp->tr_pcb_rsp = (register_t)td->td_pcb;
6322 				/* tr_pcb_rsp doesn't change */
6323 			}
6324 
6325 			/*
6326 			 * In kernel-land we always use the normal PML4E
6327 			 * so the kernel is fully mapped and can also access
6328 			 * user memory.
6329 			 */
6330 			load_cr3(td->td_pcb->pcb_cr3);
6331 			pmap = vmspace_pmap(oldvm);
6332 			ATOMIC_CPUMASK_NANDBIT(pmap->pm_active,
6333 					       mycpu->gd_cpuid);
6334 		}
6335 		crit_exit();
6336 	}
6337 }
6338 
6339 /*
6340  * Used to control the backing vmspace on the host for a guest VM.
6341  * The cpumask is needed by the host pager to properly invalidate the
6342  * host TLB when paging out the backing memory of a guest VM.
6343  *
6344  * NOTE: The scheduler might somtimes overload multiple vCPUs on the
6345  *       same physical cpu, so operating is not quite as simple as
6346  *       calling add_cpu/del_cpu in the core vmrun routines.
6347  */
6348 void
6349 pmap_add_cpu(struct vmspace *vm, int cpuid)
6350 {
6351 	ATOMIC_CPUMASK_ORBIT(vm->vm_pmap.pm_active, mycpu->gd_cpuid);
6352 	crit_enter();
6353 	pmap_interlock_wait(vm);
6354 	crit_exit();
6355 }
6356 
6357 void
6358 pmap_del_cpu(struct vmspace *vm, int cpuid)
6359 {
6360 	ATOMIC_CPUMASK_NANDBIT(vm->vm_pmap.pm_active, mycpu->gd_cpuid);
6361 }
6362 
6363 void
6364 pmap_del_all_cpus(struct vmspace *vm)
6365 {
6366 	CPUMASK_ASSZERO(vm->vm_pmap.pm_active);
6367 }
6368 
6369 /*
6370  * Called when switching to a locked pmap, used to interlock against pmaps
6371  * undergoing modifications to prevent us from activating the MMU for the
6372  * target pmap until all such modifications have completed.  We have to do
6373  * this because the thread making the modifications has already set up its
6374  * SMP synchronization mask.
6375  *
6376  * This function cannot sleep!
6377  *
6378  * No requirements.
6379  */
6380 void
6381 pmap_interlock_wait(struct vmspace *vm)
6382 {
6383 	struct pmap *pmap = &vm->vm_pmap;
6384 
6385 	if (pmap->pm_active_lock & CPULOCK_EXCL) {
6386 		crit_enter();
6387 		KKASSERT(curthread->td_critcount >= 2);
6388 		DEBUG_PUSH_INFO("pmap_interlock_wait");
6389 		while (pmap->pm_active_lock & CPULOCK_EXCL) {
6390 			cpu_ccfence();
6391 			lwkt_process_ipiq();
6392 		}
6393 		DEBUG_POP_INFO();
6394 		crit_exit();
6395 	}
6396 }
6397 
6398 vm_offset_t
6399 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
6400 {
6401 
6402 	if ((obj == NULL) || (size < NBPDR) ||
6403 	    ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) {
6404 		return addr;
6405 	}
6406 
6407 	addr = roundup2(addr, NBPDR);
6408 	return addr;
6409 }
6410 
6411 /*
6412  * Used by kmalloc/kfree, page already exists at va
6413  */
6414 vm_page_t
6415 pmap_kvtom(vm_offset_t va)
6416 {
6417 	pt_entry_t *ptep = vtopte(va);
6418 
6419 	return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
6420 }
6421 
6422 /*
6423  * Initialize machine-specific shared page directory support.  This
6424  * is executed when a VM object is created.
6425  */
6426 void
6427 pmap_object_init(vm_object_t object)
6428 {
6429 }
6430 
6431 /*
6432  * Clean up machine-specific shared page directory support.  This
6433  * is executed when a VM object is destroyed.
6434  */
6435 void
6436 pmap_object_free(vm_object_t object)
6437 {
6438 }
6439 
6440 /*
6441  * pmap_pgscan_callback - Used by pmap_pgscan to acquire the related
6442  * VM page and issue a pginfo->callback.
6443  */
6444 static
6445 void
6446 pmap_pgscan_callback(pmap_t pmap, struct pmap_scan_info *info,
6447 		      vm_pindex_t *pte_placemark,
6448 		      pv_entry_t pt_pv, vm_offset_t va,
6449 		      pt_entry_t *ptep, void *arg)
6450 {
6451 	struct pmap_pgscan_info *pginfo = arg;
6452 	vm_page_t m;
6453 	pt_entry_t pte;
6454 
6455 	pte = *ptep;
6456 	cpu_ccfence();
6457 
6458 	if (pte & pmap->pmap_bits[PG_MANAGED_IDX]) {
6459 		/*
6460 		 * Try to busy the page while we hold the pte_placemark locked.
6461 		 */
6462 		m = PHYS_TO_VM_PAGE(*ptep & PG_FRAME);
6463 		if (vm_page_busy_try(m, TRUE) == 0) {
6464 			if (m == PHYS_TO_VM_PAGE(*ptep & PG_FRAME)) {
6465 				/*
6466 				 * The callback is issued with the pt_pv
6467 				 * unlocked.
6468 				 */
6469 				pv_placemarker_wakeup(pmap, pte_placemark);
6470 				if (pt_pv) {
6471 					vm_page_wire_quick(pt_pv->pv_m);
6472 					pv_unlock(pt_pv);
6473 				}
6474 				if (pginfo->callback(pginfo, va, m) < 0)
6475 					info->stop = 1;
6476 				if (pt_pv) {
6477 					pv_lock(pt_pv);
6478 					if (vm_page_unwire_quick(pt_pv->pv_m)) {
6479 						panic("pmap_pgscan: bad wire_"
6480 						      "count on pt_pv");
6481 					}
6482 				}
6483 			} else {
6484 				vm_page_wakeup(m);
6485 				pv_placemarker_wakeup(pmap, pte_placemark);
6486 			}
6487 		} else {
6488 			++pginfo->busycount;
6489 			pv_placemarker_wakeup(pmap, pte_placemark);
6490 		}
6491 	} else {
6492 		/*
6493 		 * Shared page table or unmanaged page (sharept or !sharept)
6494 		 */
6495 		pv_placemarker_wakeup(pmap, pte_placemark);
6496 	}
6497 }
6498 
6499 void
6500 pmap_pgscan(struct pmap_pgscan_info *pginfo)
6501 {
6502 	struct pmap_scan_info info;
6503 
6504 	pginfo->offset = pginfo->beg_addr;
6505 	info.pmap = pginfo->pmap;
6506 	info.sva = pginfo->beg_addr;
6507 	info.eva = pginfo->end_addr;
6508 	info.func = pmap_pgscan_callback;
6509 	info.arg = pginfo;
6510 	pmap_scan(&info, 0);
6511 	if (info.stop == 0)
6512 		pginfo->offset = pginfo->end_addr;
6513 }
6514 
6515 /*
6516  * Wait for a placemarker that we do not own to clear.  The placemarker
6517  * in question is not necessarily set to the pindex we want, we may have
6518  * to wait on the element because we want to reserve it ourselves.
6519  *
6520  * NOTE: PM_PLACEMARK_WAKEUP sets a bit which is already set in
6521  *	 PM_NOPLACEMARK, so it does not interfere with placemarks
6522  *	 which have already been woken up.
6523  *
6524  * NOTE: This routine is called without the pmap spin-lock and so can
6525  *	 race changes to *pmark.  Due to the sensitivity of the routine
6526  *	 to possible MULTIPLE interactions from other cpus, and the
6527  *	 overloading of the WAKEUP bit on PM_NOPLACEMARK, we have to
6528  *	 use a cmpset loop to avoid a race that might cause the WAKEUP
6529  *	 bit to be lost.
6530  *
6531  * Caller is expected to retry its operation upon return.
6532  */
6533 static
6534 void
6535 pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark)
6536 {
6537 	vm_pindex_t mark;
6538 
6539 	mark = *pmark;
6540 	cpu_ccfence();
6541 	while (mark != PM_NOPLACEMARK) {
6542 		tsleep_interlock(pmark, 0);
6543 		if (atomic_fcmpset_long(pmark, &mark,
6544 				       mark | PM_PLACEMARK_WAKEUP)) {
6545 			tsleep(pmark, PINTERLOCKED, "pvplw", 0);
6546 			break;
6547 		}
6548 	}
6549 }
6550 
6551 /*
6552  * Wakeup a placemarker that we own.  Replace the entry with
6553  * PM_NOPLACEMARK and issue a wakeup() if necessary.
6554  */
6555 static
6556 void
6557 pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark)
6558 {
6559 	vm_pindex_t pindex;
6560 
6561 	pindex = atomic_swap_long(pmark, PM_NOPLACEMARK);
6562 	KKASSERT(pindex != PM_NOPLACEMARK);
6563 	if (pindex & PM_PLACEMARK_WAKEUP)
6564 		wakeup(pmark);
6565 }
6566