xref: /dragonfly/sys/platform/pc64/x86_64/pmap.c (revision 38b930d0)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1994 David Greenman
5  * Copyright (c) 2003 Peter Wemm
6  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7  * Copyright (c) 2008, 2009 The DragonFly Project.
8  * Copyright (c) 2008, 2009 Jordan Gordeev.
9  * Copyright (c) 2011-2012 Matthew Dillon
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to Berkeley by
13  * the Systems Programming Group of the University of Utah Computer
14  * Science Department and William Jolitz of UUNET Technologies Inc.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 /*
45  * Manage physical address maps for x86-64 systems.
46  */
47 
48 #if JG
49 #include "opt_disable_pse.h"
50 #include "opt_pmap.h"
51 #endif
52 #include "opt_msgbuf.h"
53 
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/proc.h>
57 #include <sys/msgbuf.h>
58 #include <sys/vmmeter.h>
59 #include <sys/mman.h>
60 #include <sys/systm.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <sys/sysctl.h>
65 #include <sys/lock.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_zone.h>
74 
75 #include <sys/user.h>
76 #include <sys/thread2.h>
77 #include <sys/sysref2.h>
78 #include <sys/spinlock2.h>
79 #include <vm/vm_page2.h>
80 
81 #include <machine/cputypes.h>
82 #include <machine/md_var.h>
83 #include <machine/specialreg.h>
84 #include <machine/smp.h>
85 #include <machine_base/apic/apicreg.h>
86 #include <machine/globaldata.h>
87 #include <machine/pmap.h>
88 #include <machine/pmap_inval.h>
89 #include <machine/inttypes.h>
90 
91 #include <ddb/ddb.h>
92 
93 #define PMAP_KEEP_PDIRS
94 #ifndef PMAP_SHPGPERPROC
95 #define PMAP_SHPGPERPROC 2000
96 #endif
97 
98 #if defined(DIAGNOSTIC)
99 #define PMAP_DIAGNOSTIC
100 #endif
101 
102 #define MINPV 2048
103 
104 /*
105  * pmap debugging will report who owns a pv lock when blocking.
106  */
107 #ifdef PMAP_DEBUG
108 
109 #define PMAP_DEBUG_DECL		,const char *func, int lineno
110 #define PMAP_DEBUG_ARGS		, __func__, __LINE__
111 #define PMAP_DEBUG_COPY		, func, lineno
112 
113 #define pv_get(pmap, pindex)		_pv_get(pmap, pindex		\
114 							PMAP_DEBUG_ARGS)
115 #define pv_lock(pv)			_pv_lock(pv			\
116 							PMAP_DEBUG_ARGS)
117 #define pv_hold_try(pv)			_pv_hold_try(pv			\
118 							PMAP_DEBUG_ARGS)
119 #define pv_alloc(pmap, pindex, isnewp)	_pv_alloc(pmap, pindex, isnewp	\
120 							PMAP_DEBUG_ARGS)
121 
122 #else
123 
124 #define PMAP_DEBUG_DECL
125 #define PMAP_DEBUG_ARGS
126 #define PMAP_DEBUG_COPY
127 
128 #define pv_get(pmap, pindex)		_pv_get(pmap, pindex)
129 #define pv_lock(pv)			_pv_lock(pv)
130 #define pv_hold_try(pv)			_pv_hold_try(pv)
131 #define pv_alloc(pmap, pindex, isnewp)	_pv_alloc(pmap, pindex, isnewp)
132 
133 #endif
134 
135 /*
136  * Get PDEs and PTEs for user/kernel address space
137  */
138 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
139 
140 #define pmap_pde_v(pmap, pte)		((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
141 #define pmap_pte_w(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0)
142 #define pmap_pte_m(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0)
143 #define pmap_pte_u(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0)
144 #define pmap_pte_v(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
145 
146 /*
147  * Given a map and a machine independent protection code,
148  * convert to a vax protection code.
149  */
150 #define pte_prot(m, p)		\
151 	(m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
152 static int protection_codes[PROTECTION_CODES_SIZE];
153 
154 struct pmap kernel_pmap;
155 static TAILQ_HEAD(,pmap)	pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
156 
157 MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects");
158 
159 vm_paddr_t avail_start;		/* PA of first available physical page */
160 vm_paddr_t avail_end;		/* PA of last available physical page */
161 vm_offset_t virtual2_start;	/* cutout free area prior to kernel start */
162 vm_offset_t virtual2_end;
163 vm_offset_t virtual_start;	/* VA of first avail page (after kernel bss) */
164 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
165 vm_offset_t KvaStart;		/* VA start of KVA space */
166 vm_offset_t KvaEnd;		/* VA end of KVA space (non-inclusive) */
167 vm_offset_t KvaSize;		/* max size of kernel virtual address space */
168 static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
169 //static int pgeflag;		/* PG_G or-in */
170 //static int pseflag;		/* PG_PS or-in */
171 uint64_t PatMsr;
172 
173 static int ndmpdp;
174 static vm_paddr_t dmaplimit;
175 static int nkpt;
176 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
177 
178 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE];	/* PAT -> PG_ bits */
179 /*static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];*/	/* PAT -> PG_ bits */
180 
181 static uint64_t KPTbase;
182 static uint64_t KPTphys;
183 static uint64_t	KPDphys;	/* phys addr of kernel level 2 */
184 static uint64_t	KPDbase;	/* phys addr of kernel level 2 @ KERNBASE */
185 uint64_t KPDPphys;	/* phys addr of kernel level 3 */
186 uint64_t KPML4phys;	/* phys addr of kernel level 4 */
187 
188 static uint64_t	DMPDphys;	/* phys addr of direct mapped level 2 */
189 static uint64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
190 
191 /*
192  * Data for the pv entry allocation mechanism
193  */
194 static vm_zone_t pvzone;
195 static struct vm_zone pvzone_store;
196 static struct vm_object pvzone_obj;
197 static int pv_entry_max=0, pv_entry_high_water=0;
198 static int pmap_pagedaemon_waken = 0;
199 static struct pv_entry *pvinit;
200 
201 /*
202  * All those kernel PT submaps that BSD is so fond of
203  */
204 pt_entry_t *CMAP1 = NULL, *ptmmap;
205 caddr_t CADDR1 = NULL, ptvmmap = NULL;
206 static pt_entry_t *msgbufmap;
207 struct msgbuf *msgbufp=NULL;
208 
209 /*
210  * PMAP default PG_* bits. Needed to be able to add
211  * EPT/NPT pagetable pmap_bits for the VMM module
212  */
213 uint64_t pmap_bits_default[] = {
214 		REGULAR_PMAP,					/* TYPE_IDX		0 */
215 		X86_PG_V,					/* PG_V_IDX		1 */
216 		X86_PG_RW,					/* PG_RW_IDX		2 */
217 		X86_PG_U,					/* PG_U_IDX		3 */
218 		X86_PG_A,					/* PG_A_IDX		4 */
219 		X86_PG_M,					/* PG_M_IDX		5 */
220 		X86_PG_PS,					/* PG_PS_IDX3		6 */
221 		X86_PG_G,					/* PG_G_IDX		7 */
222 		X86_PG_AVAIL1,					/* PG_AVAIL1_IDX	8 */
223 		X86_PG_AVAIL2,					/* PG_AVAIL2_IDX	9 */
224 		X86_PG_AVAIL3,					/* PG_AVAIL3_IDX	10 */
225 		X86_PG_NC_PWT | X86_PG_NC_PCD,			/* PG_N_IDX	11 */
226 };
227 /*
228  * Crashdump maps.
229  */
230 static pt_entry_t *pt_crashdumpmap;
231 static caddr_t crashdumpmap;
232 
233 static int pmap_yield_count = 64;
234 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW,
235     &pmap_yield_count, 0, "Yield during init_pt/release");
236 static int pmap_mmu_optimize = 0;
237 SYSCTL_INT(_machdep, OID_AUTO, pmap_mmu_optimize, CTLFLAG_RW,
238     &pmap_mmu_optimize, 0, "Share page table pages when possible");
239 
240 #define DISABLE_PSE
241 
242 /* Standard user access funtions */
243 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len,
244     size_t *lencopied);
245 extern int std_copyin (const void *udaddr, void *kaddr, size_t len);
246 extern int std_copyout (const void *kaddr, void *udaddr, size_t len);
247 extern int std_fubyte (const void *base);
248 extern int std_subyte (void *base, int byte);
249 extern long std_fuword (const void *base);
250 extern int std_suword (void *base, long word);
251 extern int std_suword32 (void *base, int word);
252 
253 static void pv_hold(pv_entry_t pv);
254 static int _pv_hold_try(pv_entry_t pv
255 				PMAP_DEBUG_DECL);
256 static void pv_drop(pv_entry_t pv);
257 static void _pv_lock(pv_entry_t pv
258 				PMAP_DEBUG_DECL);
259 static void pv_unlock(pv_entry_t pv);
260 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew
261 				PMAP_DEBUG_DECL);
262 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex
263 				PMAP_DEBUG_DECL);
264 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp);
265 static pv_entry_t pv_find(pmap_t pmap, vm_pindex_t pindex);
266 static void pv_put(pv_entry_t pv);
267 static void pv_free(pv_entry_t pv);
268 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex);
269 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
270 		      pv_entry_t *pvpp);
271 static pv_entry_t pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex,
272 		      pv_entry_t *pvpp, vm_map_entry_t entry, vm_offset_t va);
273 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp,
274 		      struct pmap_inval_info *info);
275 static vm_page_t pmap_remove_pv_page(pv_entry_t pv);
276 static int pmap_release_pv(pv_entry_t pv, pv_entry_t pvp);
277 
278 struct pmap_scan_info;
279 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
280 		      pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
281 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
282 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
283 		      pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
284 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
285 
286 static void i386_protection_init (void);
287 static void create_pagetables(vm_paddr_t *firstaddr);
288 static void pmap_remove_all (vm_page_t m);
289 static boolean_t pmap_testbit (vm_page_t m, int bit);
290 
291 static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
292 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
293 
294 static void pmap_pinit_defaults(struct pmap *pmap);
295 
296 static unsigned pdir4mb;
297 
298 static int
299 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2)
300 {
301 	if (pv1->pv_pindex < pv2->pv_pindex)
302 		return(-1);
303 	if (pv1->pv_pindex > pv2->pv_pindex)
304 		return(1);
305 	return(0);
306 }
307 
308 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry,
309              pv_entry_compare, vm_pindex_t, pv_pindex);
310 
311 static __inline
312 void
313 pmap_page_stats_adding(vm_page_t m)
314 {
315 	globaldata_t gd = mycpu;
316 
317 	if (TAILQ_EMPTY(&m->md.pv_list)) {
318 		++gd->gd_vmtotal.t_arm;
319 	} else if (TAILQ_FIRST(&m->md.pv_list) ==
320 		   TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
321 		++gd->gd_vmtotal.t_armshr;
322 		++gd->gd_vmtotal.t_avmshr;
323 	} else {
324 		++gd->gd_vmtotal.t_avmshr;
325 	}
326 }
327 
328 static __inline
329 void
330 pmap_page_stats_deleting(vm_page_t m)
331 {
332 	globaldata_t gd = mycpu;
333 
334 	if (TAILQ_EMPTY(&m->md.pv_list)) {
335 		--gd->gd_vmtotal.t_arm;
336 	} else if (TAILQ_FIRST(&m->md.pv_list) ==
337 		   TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
338 		--gd->gd_vmtotal.t_armshr;
339 		--gd->gd_vmtotal.t_avmshr;
340 	} else {
341 		--gd->gd_vmtotal.t_avmshr;
342 	}
343 }
344 
345 /*
346  * Move the kernel virtual free pointer to the next
347  * 2MB.  This is used to help improve performance
348  * by using a large (2MB) page for much of the kernel
349  * (.text, .data, .bss)
350  */
351 static
352 vm_offset_t
353 pmap_kmem_choose(vm_offset_t addr)
354 {
355 	vm_offset_t newaddr = addr;
356 
357 	newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
358 	return newaddr;
359 }
360 
361 /*
362  * pmap_pte_quick:
363  *
364  *	Super fast pmap_pte routine best used when scanning the pv lists.
365  *	This eliminates many course-grained invltlb calls.  Note that many of
366  *	the pv list scans are across different pmaps and it is very wasteful
367  *	to do an entire invltlb when checking a single mapping.
368  */
369 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
370 
371 static
372 pt_entry_t *
373 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
374 {
375 	return pmap_pte(pmap, va);
376 }
377 
378 /*
379  * Returns the pindex of a page table entry (representing a terminal page).
380  * There are NUPTE_TOTAL page table entries possible (a huge number)
381  *
382  * x86-64 has a 48-bit address space, where bit 47 is sign-extended out.
383  * We want to properly translate negative KVAs.
384  */
385 static __inline
386 vm_pindex_t
387 pmap_pte_pindex(vm_offset_t va)
388 {
389 	return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1));
390 }
391 
392 /*
393  * Returns the pindex of a page table.
394  */
395 static __inline
396 vm_pindex_t
397 pmap_pt_pindex(vm_offset_t va)
398 {
399 	return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1)));
400 }
401 
402 /*
403  * Returns the pindex of a page directory.
404  */
405 static __inline
406 vm_pindex_t
407 pmap_pd_pindex(vm_offset_t va)
408 {
409 	return (NUPTE_TOTAL + NUPT_TOTAL +
410 		((va >> PDPSHIFT) & (NUPD_TOTAL - 1)));
411 }
412 
413 static __inline
414 vm_pindex_t
415 pmap_pdp_pindex(vm_offset_t va)
416 {
417 	return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
418 		((va >> PML4SHIFT) & (NUPDP_TOTAL - 1)));
419 }
420 
421 static __inline
422 vm_pindex_t
423 pmap_pml4_pindex(void)
424 {
425 	return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL);
426 }
427 
428 /*
429  * Return various clipped indexes for a given VA
430  *
431  * Returns the index of a pte in a page table, representing a terminal
432  * page.
433  */
434 static __inline
435 vm_pindex_t
436 pmap_pte_index(vm_offset_t va)
437 {
438 	return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
439 }
440 
441 /*
442  * Returns the index of a pt in a page directory, representing a page
443  * table.
444  */
445 static __inline
446 vm_pindex_t
447 pmap_pt_index(vm_offset_t va)
448 {
449 	return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
450 }
451 
452 /*
453  * Returns the index of a pd in a page directory page, representing a page
454  * directory.
455  */
456 static __inline
457 vm_pindex_t
458 pmap_pd_index(vm_offset_t va)
459 {
460 	return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
461 }
462 
463 /*
464  * Returns the index of a pdp in the pml4 table, representing a page
465  * directory page.
466  */
467 static __inline
468 vm_pindex_t
469 pmap_pdp_index(vm_offset_t va)
470 {
471 	return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
472 }
473 
474 /*
475  * Generic procedure to index a pte from a pt, pd, or pdp.
476  *
477  * NOTE: Normally passed pindex as pmap_xx_index().  pmap_xx_pindex() is NOT
478  *	 a page table page index but is instead of PV lookup index.
479  */
480 static
481 void *
482 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex)
483 {
484 	pt_entry_t *pte;
485 
486 	pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m));
487 	return(&pte[pindex]);
488 }
489 
490 /*
491  * Return pointer to PDP slot in the PML4
492  */
493 static __inline
494 pml4_entry_t *
495 pmap_pdp(pmap_t pmap, vm_offset_t va)
496 {
497 	return (&pmap->pm_pml4[pmap_pdp_index(va)]);
498 }
499 
500 /*
501  * Return pointer to PD slot in the PDP given a pointer to the PDP
502  */
503 static __inline
504 pdp_entry_t *
505 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va)
506 {
507 	pdp_entry_t *pd;
508 
509 	pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME);
510 	return (&pd[pmap_pd_index(va)]);
511 }
512 
513 /*
514  * Return pointer to PD slot in the PDP.
515  */
516 static __inline
517 pdp_entry_t *
518 pmap_pd(pmap_t pmap, vm_offset_t va)
519 {
520 	pml4_entry_t *pdp;
521 
522 	pdp = pmap_pdp(pmap, va);
523 	if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0)
524 		return NULL;
525 	return (pmap_pdp_to_pd(*pdp, va));
526 }
527 
528 /*
529  * Return pointer to PT slot in the PD given a pointer to the PD
530  */
531 static __inline
532 pd_entry_t *
533 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va)
534 {
535 	pd_entry_t *pt;
536 
537 	pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME);
538 	return (&pt[pmap_pt_index(va)]);
539 }
540 
541 /*
542  * Return pointer to PT slot in the PD
543  *
544  * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs,
545  *		     so we cannot lookup the PD via the PDP.  Instead we
546  *		     must look it up via the pmap.
547  */
548 static __inline
549 pd_entry_t *
550 pmap_pt(pmap_t pmap, vm_offset_t va)
551 {
552 	pdp_entry_t *pd;
553 	pv_entry_t pv;
554 	vm_pindex_t pd_pindex;
555 
556 	if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
557 		pd_pindex = pmap_pd_pindex(va);
558 		spin_lock(&pmap->pm_spin);
559 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex);
560 		spin_unlock(&pmap->pm_spin);
561 		if (pv == NULL || pv->pv_m == NULL)
562 			return NULL;
563 		return (pmap_pd_to_pt(VM_PAGE_TO_PHYS(pv->pv_m), va));
564 	} else {
565 		pd = pmap_pd(pmap, va);
566 		if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0)
567 			 return NULL;
568 		return (pmap_pd_to_pt(*pd, va));
569 	}
570 }
571 
572 /*
573  * Return pointer to PTE slot in the PT given a pointer to the PT
574  */
575 static __inline
576 pt_entry_t *
577 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va)
578 {
579 	pt_entry_t *pte;
580 
581 	pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME);
582 	return (&pte[pmap_pte_index(va)]);
583 }
584 
585 /*
586  * Return pointer to PTE slot in the PT
587  */
588 static __inline
589 pt_entry_t *
590 pmap_pte(pmap_t pmap, vm_offset_t va)
591 {
592 	pd_entry_t *pt;
593 
594 	pt = pmap_pt(pmap, va);
595 	if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0)
596 		 return NULL;
597 	if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0)
598 		return ((pt_entry_t *)pt);
599 	return (pmap_pt_to_pte(*pt, va));
600 }
601 
602 /*
603  * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
604  * the PT layer.  This will speed up core pmap operations considerably.
605  *
606  * NOTE: The pmap spinlock does not need to be held but the passed-in pv
607  *	 must be in a known associated state (typically by being locked when
608  *	 the pmap spinlock isn't held).  We allow the race for that case.
609  */
610 static __inline
611 void
612 pv_cache(pv_entry_t pv, vm_pindex_t pindex)
613 {
614 	if (pindex >= pmap_pt_pindex(0) && pindex <= pmap_pd_pindex(0))
615 		pv->pv_pmap->pm_pvhint = pv;
616 }
617 
618 
619 /*
620  * KVM - return address of PT slot in PD
621  */
622 static __inline
623 pd_entry_t *
624 vtopt(vm_offset_t va)
625 {
626 	uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
627 				  NPML4EPGSHIFT)) - 1);
628 
629 	return (PDmap + ((va >> PDRSHIFT) & mask));
630 }
631 
632 /*
633  * KVM - return address of PTE slot in PT
634  */
635 static __inline
636 pt_entry_t *
637 vtopte(vm_offset_t va)
638 {
639 	uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
640 				  NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
641 
642 	return (PTmap + ((va >> PAGE_SHIFT) & mask));
643 }
644 
645 static uint64_t
646 allocpages(vm_paddr_t *firstaddr, long n)
647 {
648 	uint64_t ret;
649 
650 	ret = *firstaddr;
651 	bzero((void *)ret, n * PAGE_SIZE);
652 	*firstaddr += n * PAGE_SIZE;
653 	return (ret);
654 }
655 
656 static
657 void
658 create_pagetables(vm_paddr_t *firstaddr)
659 {
660 	long i;		/* must be 64 bits */
661 	long nkpt_base;
662 	long nkpt_phys;
663 	int j;
664 
665 	/*
666 	 * We are running (mostly) V=P at this point
667 	 *
668 	 * Calculate NKPT - number of kernel page tables.  We have to
669 	 * accomodoate prealloction of the vm_page_array, dump bitmap,
670 	 * MSGBUF_SIZE, and other stuff.  Be generous.
671 	 *
672 	 * Maxmem is in pages.
673 	 *
674 	 * ndmpdp is the number of 1GB pages we wish to map.
675 	 */
676 	ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
677 	if (ndmpdp < 4)		/* Minimum 4GB of dirmap */
678 		ndmpdp = 4;
679 	KKASSERT(ndmpdp <= NKPDPE * NPDEPG);
680 
681 	/*
682 	 * Starting at the beginning of kvm (not KERNBASE).
683 	 */
684 	nkpt_phys = (Maxmem * sizeof(struct vm_page) + NBPDR - 1) / NBPDR;
685 	nkpt_phys += (Maxmem * sizeof(struct pv_entry) + NBPDR - 1) / NBPDR;
686 	nkpt_phys += ((nkpt + nkpt + 1 + NKPML4E + NKPDPE + NDMPML4E +
687 		       ndmpdp) + 511) / 512;
688 	nkpt_phys += 128;
689 
690 	/*
691 	 * Starting at KERNBASE - map 2G worth of page table pages.
692 	 * KERNBASE is offset -2G from the end of kvm.
693 	 */
694 	nkpt_base = (NPDPEPG - KPDPI) * NPTEPG;	/* typically 2 x 512 */
695 
696 	/*
697 	 * Allocate pages
698 	 */
699 	KPTbase = allocpages(firstaddr, nkpt_base);
700 	KPTphys = allocpages(firstaddr, nkpt_phys);
701 	KPML4phys = allocpages(firstaddr, 1);
702 	KPDPphys = allocpages(firstaddr, NKPML4E);
703 	KPDphys = allocpages(firstaddr, NKPDPE);
704 
705 	/*
706 	 * Calculate the page directory base for KERNBASE,
707 	 * that is where we start populating the page table pages.
708 	 * Basically this is the end - 2.
709 	 */
710 	KPDbase = KPDphys + ((NKPDPE - (NPDPEPG - KPDPI)) << PAGE_SHIFT);
711 
712 	DMPDPphys = allocpages(firstaddr, NDMPML4E);
713 	if ((amd_feature & AMDID_PAGE1GB) == 0)
714 		DMPDphys = allocpages(firstaddr, ndmpdp);
715 	dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
716 
717 	/*
718 	 * Fill in the underlying page table pages for the area around
719 	 * KERNBASE.  This remaps low physical memory to KERNBASE.
720 	 *
721 	 * Read-only from zero to physfree
722 	 * XXX not fully used, underneath 2M pages
723 	 */
724 	for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
725 		((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT;
726 		((pt_entry_t *)KPTbase)[i] |=
727 		    pmap_bits_default[PG_RW_IDX] |
728 		    pmap_bits_default[PG_V_IDX] |
729 		    pmap_bits_default[PG_G_IDX];
730 	}
731 
732 	/*
733 	 * Now map the initial kernel page tables.  One block of page
734 	 * tables is placed at the beginning of kernel virtual memory,
735 	 * and another block is placed at KERNBASE to map the kernel binary,
736 	 * data, bss, and initial pre-allocations.
737 	 */
738 	for (i = 0; i < nkpt_base; i++) {
739 		((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT);
740 		((pd_entry_t *)KPDbase)[i] |=
741 		    pmap_bits_default[PG_RW_IDX] |
742 		    pmap_bits_default[PG_V_IDX];
743 	}
744 	for (i = 0; i < nkpt_phys; i++) {
745 		((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
746 		((pd_entry_t *)KPDphys)[i] |=
747 		    pmap_bits_default[PG_RW_IDX] |
748 		    pmap_bits_default[PG_V_IDX];
749 	}
750 
751 	/*
752 	 * Map from zero to end of allocations using 2M pages as an
753 	 * optimization.  This will bypass some of the KPTBase pages
754 	 * above in the KERNBASE area.
755 	 */
756 	for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
757 		((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT;
758 		((pd_entry_t *)KPDbase)[i] |=
759 		    pmap_bits_default[PG_RW_IDX] |
760 		    pmap_bits_default[PG_V_IDX] |
761 		    pmap_bits_default[PG_PS_IDX] |
762 		    pmap_bits_default[PG_G_IDX];
763 	}
764 
765 	/*
766 	 * And connect up the PD to the PDP.  The kernel pmap is expected
767 	 * to pre-populate all of its PDs.  See NKPDPE in vmparam.h.
768 	 */
769 	for (i = 0; i < NKPDPE; i++) {
770 		((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] =
771 				KPDphys + (i << PAGE_SHIFT);
772 		((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] |=
773 		    pmap_bits_default[PG_RW_IDX] |
774 		    pmap_bits_default[PG_V_IDX] |
775 		    pmap_bits_default[PG_U_IDX];
776 	}
777 
778 	/*
779 	 * Now set up the direct map space using either 2MB or 1GB pages
780 	 * Preset PG_M and PG_A because demotion expects it.
781 	 *
782 	 * When filling in entries in the PD pages make sure any excess
783 	 * entries are set to zero as we allocated enough PD pages
784 	 */
785 	if ((amd_feature & AMDID_PAGE1GB) == 0) {
786 		for (i = 0; i < NPDEPG * ndmpdp; i++) {
787 			((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT;
788 			((pd_entry_t *)DMPDphys)[i] |=
789 			    pmap_bits_default[PG_RW_IDX] |
790 			    pmap_bits_default[PG_V_IDX] |
791 			    pmap_bits_default[PG_PS_IDX] |
792 			    pmap_bits_default[PG_G_IDX] |
793 			    pmap_bits_default[PG_M_IDX] |
794 			    pmap_bits_default[PG_A_IDX];
795 		}
796 
797 		/*
798 		 * And the direct map space's PDP
799 		 */
800 		for (i = 0; i < ndmpdp; i++) {
801 			((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
802 							(i << PAGE_SHIFT);
803 			((pdp_entry_t *)DMPDPphys)[i] |=
804 			    pmap_bits_default[PG_RW_IDX] |
805 			    pmap_bits_default[PG_V_IDX] |
806 			    pmap_bits_default[PG_U_IDX];
807 		}
808 	} else {
809 		for (i = 0; i < ndmpdp; i++) {
810 			((pdp_entry_t *)DMPDPphys)[i] =
811 						(vm_paddr_t)i << PDPSHIFT;
812 			((pdp_entry_t *)DMPDPphys)[i] |=
813 			    pmap_bits_default[PG_RW_IDX] |
814 			    pmap_bits_default[PG_V_IDX] |
815 			    pmap_bits_default[PG_PS_IDX] |
816 			    pmap_bits_default[PG_G_IDX] |
817 			    pmap_bits_default[PG_M_IDX] |
818 			    pmap_bits_default[PG_A_IDX];
819 		}
820 	}
821 
822 	/* And recursively map PML4 to itself in order to get PTmap */
823 	((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
824 	((pdp_entry_t *)KPML4phys)[PML4PML4I] |=
825 	    pmap_bits_default[PG_RW_IDX] |
826 	    pmap_bits_default[PG_V_IDX] |
827 	    pmap_bits_default[PG_U_IDX];
828 
829 	/*
830 	 * Connect the Direct Map slots up to the PML4
831 	 */
832 	for (j = 0; j < NDMPML4E; ++j) {
833 		((pdp_entry_t *)KPML4phys)[DMPML4I + j] =
834 		    (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) |
835 		    pmap_bits_default[PG_RW_IDX] |
836 		    pmap_bits_default[PG_V_IDX] |
837 		    pmap_bits_default[PG_U_IDX];
838 	}
839 
840 	/*
841 	 * Connect the KVA slot up to the PML4
842 	 */
843 	((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
844 	((pdp_entry_t *)KPML4phys)[KPML4I] |=
845 	    pmap_bits_default[PG_RW_IDX] |
846 	    pmap_bits_default[PG_V_IDX] |
847 	    pmap_bits_default[PG_U_IDX];
848 }
849 
850 /*
851  *	Bootstrap the system enough to run with virtual memory.
852  *
853  *	On the i386 this is called after mapping has already been enabled
854  *	and just syncs the pmap module with what has already been done.
855  *	[We can't call it easily with mapping off since the kernel is not
856  *	mapped with PA == VA, hence we would have to relocate every address
857  *	from the linked base (virtual) address "KERNBASE" to the actual
858  *	(physical) address starting relative to 0]
859  */
860 void
861 pmap_bootstrap(vm_paddr_t *firstaddr)
862 {
863 	vm_offset_t va;
864 	pt_entry_t *pte;
865 
866 	KvaStart = VM_MIN_KERNEL_ADDRESS;
867 	KvaEnd = VM_MAX_KERNEL_ADDRESS;
868 	KvaSize = KvaEnd - KvaStart;
869 
870 	avail_start = *firstaddr;
871 
872 	/*
873 	 * Create an initial set of page tables to run the kernel in.
874 	 */
875 	create_pagetables(firstaddr);
876 
877 	virtual2_start = KvaStart;
878 	virtual2_end = PTOV_OFFSET;
879 
880 	virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
881 	virtual_start = pmap_kmem_choose(virtual_start);
882 
883 	virtual_end = VM_MAX_KERNEL_ADDRESS;
884 
885 	/* XXX do %cr0 as well */
886 	load_cr4(rcr4() | CR4_PGE | CR4_PSE);
887 	load_cr3(KPML4phys);
888 
889 	/*
890 	 * Initialize protection array.
891 	 */
892 	i386_protection_init();
893 
894 	/*
895 	 * The kernel's pmap is statically allocated so we don't have to use
896 	 * pmap_create, which is unlikely to work correctly at this part of
897 	 * the boot sequence (XXX and which no longer exists).
898 	 */
899 	kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
900 	kernel_pmap.pm_count = 1;
901 	kernel_pmap.pm_active = (cpumask_t)-1 & ~CPUMASK_LOCK;
902 	RB_INIT(&kernel_pmap.pm_pvroot);
903 	spin_init(&kernel_pmap.pm_spin);
904 	lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
905 
906 	/*
907 	 * Reserve some special page table entries/VA space for temporary
908 	 * mapping of pages.
909 	 */
910 #define	SYSMAP(c, p, v, n)	\
911 	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
912 
913 	va = virtual_start;
914 	pte = vtopte(va);
915 
916 	/*
917 	 * CMAP1/CMAP2 are used for zeroing and copying pages.
918 	 */
919 	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
920 
921 	/*
922 	 * Crashdump maps.
923 	 */
924 	SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
925 
926 	/*
927 	 * ptvmmap is used for reading arbitrary physical pages via
928 	 * /dev/mem.
929 	 */
930 	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
931 
932 	/*
933 	 * msgbufp is used to map the system message buffer.
934 	 * XXX msgbufmap is not used.
935 	 */
936 	SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
937 	       atop(round_page(MSGBUF_SIZE)))
938 
939 	virtual_start = va;
940 
941 	*CMAP1 = 0;
942 
943 	/*
944 	 * PG_G is terribly broken on SMP because we IPI invltlb's in some
945 	 * cases rather then invl1pg.  Actually, I don't even know why it
946 	 * works under UP because self-referential page table mappings
947 	 */
948 //	pgeflag = 0;
949 
950 /*
951  * Initialize the 4MB page size flag
952  */
953 //	pseflag = 0;
954 /*
955  * The 4MB page version of the initial
956  * kernel page mapping.
957  */
958 	pdir4mb = 0;
959 
960 #if !defined(DISABLE_PSE)
961 	if (cpu_feature & CPUID_PSE) {
962 		pt_entry_t ptditmp;
963 		/*
964 		 * Note that we have enabled PSE mode
965 		 */
966 //		pseflag = kernel_pmap.pmap_bits[PG_PS_IDX];
967 		ptditmp = *(PTmap + x86_64_btop(KERNBASE));
968 		ptditmp &= ~(NBPDR - 1);
969 		ptditmp |= pmap_bits_default[PG_V_IDX] |
970 		    pmap_bits_default[PG_RW_IDX] |
971 		    pmap_bits_default[PG_PS_IDX] |
972 		    pmap_bits_default[PG_U_IDX];
973 //		    pgeflag;
974 		pdir4mb = ptditmp;
975 	}
976 #endif
977 	cpu_invltlb();
978 
979 	/* Initialize the PAT MSR */
980 	pmap_init_pat();
981 
982 	pmap_pinit_defaults(&kernel_pmap);
983 }
984 
985 /*
986  * Setup the PAT MSR.
987  */
988 void
989 pmap_init_pat(void)
990 {
991 	uint64_t pat_msr;
992 	u_long cr0, cr4;
993 
994 	/*
995 	 * Default values mapping PATi,PCD,PWT bits at system reset.
996 	 * The default values effectively ignore the PATi bit by
997 	 * repeating the encodings for 0-3 in 4-7, and map the PCD
998 	 * and PWT bit combinations to the expected PAT types.
999 	 */
1000 	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |	/* 000 */
1001 		  PAT_VALUE(1, PAT_WRITE_THROUGH) |	/* 001 */
1002 		  PAT_VALUE(2, PAT_UNCACHED) |		/* 010 */
1003 		  PAT_VALUE(3, PAT_UNCACHEABLE) |	/* 011 */
1004 		  PAT_VALUE(4, PAT_WRITE_BACK) |	/* 100 */
1005 		  PAT_VALUE(5, PAT_WRITE_THROUGH) |	/* 101 */
1006 		  PAT_VALUE(6, PAT_UNCACHED) |		/* 110 */
1007 		  PAT_VALUE(7, PAT_UNCACHEABLE);	/* 111 */
1008 	pat_pte_index[PAT_WRITE_BACK]	= 0;
1009 	pat_pte_index[PAT_WRITE_THROUGH]= 0         | X86_PG_NC_PWT;
1010 	pat_pte_index[PAT_UNCACHED]	= X86_PG_NC_PCD;
1011 	pat_pte_index[PAT_UNCACHEABLE]	= X86_PG_NC_PCD | X86_PG_NC_PWT;
1012 	pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE];
1013 	pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE];
1014 
1015 	if (cpu_feature & CPUID_PAT) {
1016 		/*
1017 		 * If we support the PAT then set-up entries for
1018 		 * WRITE_PROTECTED and WRITE_COMBINING using bit patterns
1019 		 * 4 and 5.
1020 		 */
1021 		pat_msr = (pat_msr & ~PAT_MASK(4)) |
1022 			  PAT_VALUE(4, PAT_WRITE_PROTECTED);
1023 		pat_msr = (pat_msr & ~PAT_MASK(5)) |
1024 			  PAT_VALUE(5, PAT_WRITE_COMBINING);
1025 		pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | 0;
1026 		pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PWT;
1027 
1028 		/*
1029 		 * Then enable the PAT
1030 		 */
1031 
1032 		/* Disable PGE. */
1033 		cr4 = rcr4();
1034 		load_cr4(cr4 & ~CR4_PGE);
1035 
1036 		/* Disable caches (CD = 1, NW = 0). */
1037 		cr0 = rcr0();
1038 		load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1039 
1040 		/* Flushes caches and TLBs. */
1041 		wbinvd();
1042 		cpu_invltlb();
1043 
1044 		/* Update PAT and index table. */
1045 		wrmsr(MSR_PAT, pat_msr);
1046 
1047 		/* Flush caches and TLBs again. */
1048 		wbinvd();
1049 		cpu_invltlb();
1050 
1051 		/* Restore caches and PGE. */
1052 		load_cr0(cr0);
1053 		load_cr4(cr4);
1054 		PatMsr = pat_msr;
1055 	}
1056 }
1057 
1058 /*
1059  * Set 4mb pdir for mp startup
1060  */
1061 void
1062 pmap_set_opt(void)
1063 {
1064 	if (cpu_feature & CPUID_PSE) {
1065 		load_cr4(rcr4() | CR4_PSE);
1066 		if (pdir4mb && mycpu->gd_cpuid == 0) {	/* only on BSP */
1067 			cpu_invltlb();
1068 		}
1069 	}
1070 }
1071 
1072 /*
1073  *	Initialize the pmap module.
1074  *	Called by vm_init, to initialize any structures that the pmap
1075  *	system needs to map virtual memory.
1076  *	pmap_init has been enhanced to support in a fairly consistant
1077  *	way, discontiguous physical memory.
1078  */
1079 void
1080 pmap_init(void)
1081 {
1082 	int i;
1083 	int initial_pvs;
1084 
1085 	/*
1086 	 * Allocate memory for random pmap data structures.  Includes the
1087 	 * pv_head_table.
1088 	 */
1089 
1090 	for (i = 0; i < vm_page_array_size; i++) {
1091 		vm_page_t m;
1092 
1093 		m = &vm_page_array[i];
1094 		TAILQ_INIT(&m->md.pv_list);
1095 	}
1096 
1097 	/*
1098 	 * init the pv free list
1099 	 */
1100 	initial_pvs = vm_page_array_size;
1101 	if (initial_pvs < MINPV)
1102 		initial_pvs = MINPV;
1103 	pvzone = &pvzone_store;
1104 	pvinit = (void *)kmem_alloc(&kernel_map,
1105 				    initial_pvs * sizeof (struct pv_entry));
1106 	zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
1107 		  pvinit, initial_pvs);
1108 
1109 	/*
1110 	 * Now it is safe to enable pv_table recording.
1111 	 */
1112 	pmap_initialized = TRUE;
1113 }
1114 
1115 /*
1116  * Initialize the address space (zone) for the pv_entries.  Set a
1117  * high water mark so that the system can recover from excessive
1118  * numbers of pv entries.
1119  */
1120 void
1121 pmap_init2(void)
1122 {
1123 	int shpgperproc = PMAP_SHPGPERPROC;
1124 	int entry_max;
1125 
1126 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1127 	pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1128 	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1129 	pv_entry_high_water = 9 * (pv_entry_max / 10);
1130 
1131 	/*
1132 	 * Subtract out pages already installed in the zone (hack)
1133 	 */
1134 	entry_max = pv_entry_max - vm_page_array_size;
1135 	if (entry_max <= 0)
1136 		entry_max = 1;
1137 
1138 	zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
1139 }
1140 
1141 /*
1142  * Typically used to initialize a fictitious page by vm/device_pager.c
1143  */
1144 void
1145 pmap_page_init(struct vm_page *m)
1146 {
1147 	vm_page_init(m);
1148 	TAILQ_INIT(&m->md.pv_list);
1149 }
1150 
1151 /***************************************************
1152  * Low level helper routines.....
1153  ***************************************************/
1154 
1155 /*
1156  * this routine defines the region(s) of memory that should
1157  * not be tested for the modified bit.
1158  */
1159 static __inline
1160 int
1161 pmap_track_modified(vm_pindex_t pindex)
1162 {
1163 	vm_offset_t va = (vm_offset_t)pindex << PAGE_SHIFT;
1164 	if ((va < clean_sva) || (va >= clean_eva))
1165 		return 1;
1166 	else
1167 		return 0;
1168 }
1169 
1170 /*
1171  * Extract the physical page address associated with the map/VA pair.
1172  * The page must be wired for this to work reliably.
1173  *
1174  * XXX for the moment we're using pv_find() instead of pv_get(), as
1175  *     callers might be expecting non-blocking operation.
1176  */
1177 vm_paddr_t
1178 pmap_extract(pmap_t pmap, vm_offset_t va)
1179 {
1180 	vm_paddr_t rtval;
1181 	pv_entry_t pt_pv;
1182 	pt_entry_t *ptep;
1183 
1184 	rtval = 0;
1185 	if (va >= VM_MAX_USER_ADDRESS) {
1186 		/*
1187 		 * Kernel page directories might be direct-mapped and
1188 		 * there is typically no PV tracking of pte's
1189 		 */
1190 		pd_entry_t *pt;
1191 
1192 		pt = pmap_pt(pmap, va);
1193 		if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) {
1194 			if (*pt & pmap->pmap_bits[PG_PS_IDX]) {
1195 				rtval = *pt & PG_PS_FRAME;
1196 				rtval |= va & PDRMASK;
1197 			} else {
1198 				ptep = pmap_pt_to_pte(*pt, va);
1199 				if (*pt & pmap->pmap_bits[PG_V_IDX]) {
1200 					rtval = *ptep & PG_FRAME;
1201 					rtval |= va & PAGE_MASK;
1202 				}
1203 			}
1204 		}
1205 	} else {
1206 		/*
1207 		 * User pages currently do not direct-map the page directory
1208 		 * and some pages might not used managed PVs.  But all PT's
1209 		 * will have a PV.
1210 		 */
1211 		pt_pv = pv_find(pmap, pmap_pt_pindex(va));
1212 		if (pt_pv) {
1213 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1214 			if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1215 				rtval = *ptep & PG_FRAME;
1216 				rtval |= va & PAGE_MASK;
1217 			}
1218 			pv_drop(pt_pv);
1219 		}
1220 	}
1221 	return rtval;
1222 }
1223 
1224 /*
1225  * Similar to extract but checks protections, SMP-friendly short-cut for
1226  * vm_fault_page[_quick]().  Can return NULL to cause the caller to
1227  * fall-through to the real fault code.
1228  *
1229  * The returned page, if not NULL, is held (and not busied).
1230  */
1231 vm_page_t
1232 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1233 {
1234 	if (pmap && va < VM_MAX_USER_ADDRESS) {
1235 		pv_entry_t pt_pv;
1236 		pv_entry_t pte_pv;
1237 		pt_entry_t *ptep;
1238 		pt_entry_t req;
1239 		vm_page_t m;
1240 		int error;
1241 
1242 		req = pmap->pmap_bits[PG_V_IDX] |
1243 		      pmap->pmap_bits[PG_U_IDX];
1244 		if (prot & VM_PROT_WRITE)
1245 			req |= pmap->pmap_bits[PG_RW_IDX];
1246 
1247 		pt_pv = pv_find(pmap, pmap_pt_pindex(va));
1248 		if (pt_pv == NULL)
1249 			return (NULL);
1250 		ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1251 		if ((*ptep & req) != req) {
1252 			pv_drop(pt_pv);
1253 			return (NULL);
1254 		}
1255 		pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), &error);
1256 		if (pte_pv && error == 0) {
1257 			m = pte_pv->pv_m;
1258 			vm_page_hold(m);
1259 			if (prot & VM_PROT_WRITE)
1260 				vm_page_dirty(m);
1261 			pv_put(pte_pv);
1262 		} else if (pte_pv) {
1263 			pv_drop(pte_pv);
1264 			m = NULL;
1265 		} else {
1266 			m = NULL;
1267 		}
1268 		pv_drop(pt_pv);
1269 		return(m);
1270 	} else {
1271 		return(NULL);
1272 	}
1273 }
1274 
1275 /*
1276  * Extract the physical page address associated kernel virtual address.
1277  */
1278 vm_paddr_t
1279 pmap_kextract(vm_offset_t va)
1280 {
1281 	pd_entry_t pt;		/* pt entry in pd */
1282 	vm_paddr_t pa;
1283 
1284 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1285 		pa = DMAP_TO_PHYS(va);
1286 	} else {
1287 		pt = *vtopt(va);
1288 		if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) {
1289 			pa = (pt & PG_PS_FRAME) | (va & PDRMASK);
1290 		} else {
1291 			/*
1292 			 * Beware of a concurrent promotion that changes the
1293 			 * PDE at this point!  For example, vtopte() must not
1294 			 * be used to access the PTE because it would use the
1295 			 * new PDE.  It is, however, safe to use the old PDE
1296 			 * because the page table page is preserved by the
1297 			 * promotion.
1298 			 */
1299 			pa = *pmap_pt_to_pte(pt, va);
1300 			pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1301 		}
1302 	}
1303 	return pa;
1304 }
1305 
1306 /***************************************************
1307  * Low level mapping routines.....
1308  ***************************************************/
1309 
1310 /*
1311  * Routine: pmap_kenter
1312  * Function:
1313  *  	Add a wired page to the KVA
1314  *  	NOTE! note that in order for the mapping to take effect -- you
1315  *  	should do an invltlb after doing the pmap_kenter().
1316  */
1317 void
1318 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1319 {
1320 	pt_entry_t *pte;
1321 	pt_entry_t npte;
1322 	pmap_inval_info info;
1323 
1324 	pmap_inval_init(&info);				/* XXX remove */
1325 	npte = pa |
1326 	    kernel_pmap.pmap_bits[PG_RW_IDX] |
1327 	    kernel_pmap.pmap_bits[PG_V_IDX];
1328 //	    pgeflag;
1329 	pte = vtopte(va);
1330 	pmap_inval_interlock(&info, &kernel_pmap, va);	/* XXX remove */
1331 	*pte = npte;
1332 	pmap_inval_deinterlock(&info, &kernel_pmap);	/* XXX remove */
1333 	pmap_inval_done(&info);				/* XXX remove */
1334 }
1335 
1336 /*
1337  * Routine: pmap_kenter_quick
1338  * Function:
1339  *  	Similar to pmap_kenter(), except we only invalidate the
1340  *  	mapping on the current CPU.
1341  */
1342 void
1343 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1344 {
1345 	pt_entry_t *pte;
1346 	pt_entry_t npte;
1347 
1348 	npte = pa |
1349 	    kernel_pmap.pmap_bits[PG_RW_IDX] |
1350 	    kernel_pmap.pmap_bits[PG_V_IDX];
1351 //	    pgeflag;
1352 	pte = vtopte(va);
1353 	*pte = npte;
1354 	cpu_invlpg((void *)va);
1355 }
1356 
1357 void
1358 pmap_kenter_sync(vm_offset_t va)
1359 {
1360 	pmap_inval_info info;
1361 
1362 	pmap_inval_init(&info);
1363 	pmap_inval_interlock(&info, &kernel_pmap, va);
1364 	pmap_inval_deinterlock(&info, &kernel_pmap);
1365 	pmap_inval_done(&info);
1366 }
1367 
1368 void
1369 pmap_kenter_sync_quick(vm_offset_t va)
1370 {
1371 	cpu_invlpg((void *)va);
1372 }
1373 
1374 /*
1375  * remove a page from the kernel pagetables
1376  */
1377 void
1378 pmap_kremove(vm_offset_t va)
1379 {
1380 	pt_entry_t *pte;
1381 	pmap_inval_info info;
1382 
1383 	pmap_inval_init(&info);
1384 	pte = vtopte(va);
1385 	pmap_inval_interlock(&info, &kernel_pmap, va);
1386 	(void)pte_load_clear(pte);
1387 	pmap_inval_deinterlock(&info, &kernel_pmap);
1388 	pmap_inval_done(&info);
1389 }
1390 
1391 void
1392 pmap_kremove_quick(vm_offset_t va)
1393 {
1394 	pt_entry_t *pte;
1395 	pte = vtopte(va);
1396 	(void)pte_load_clear(pte);
1397 	cpu_invlpg((void *)va);
1398 }
1399 
1400 /*
1401  * XXX these need to be recoded.  They are not used in any critical path.
1402  */
1403 void
1404 pmap_kmodify_rw(vm_offset_t va)
1405 {
1406 	atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]);
1407 	cpu_invlpg((void *)va);
1408 }
1409 
1410 /* NOT USED
1411 void
1412 pmap_kmodify_nc(vm_offset_t va)
1413 {
1414 	atomic_set_long(vtopte(va), PG_N);
1415 	cpu_invlpg((void *)va);
1416 }
1417 */
1418 
1419 /*
1420  * Used to map a range of physical addresses into kernel virtual
1421  * address space during the low level boot, typically to map the
1422  * dump bitmap, message buffer, and vm_page_array.
1423  *
1424  * These mappings are typically made at some pointer after the end of the
1425  * kernel text+data.
1426  *
1427  * We could return PHYS_TO_DMAP(start) here and not allocate any
1428  * via (*virtp), but then kmem from userland and kernel dumps won't
1429  * have access to the related pointers.
1430  */
1431 vm_offset_t
1432 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
1433 {
1434 	vm_offset_t va;
1435 	vm_offset_t va_start;
1436 
1437 	/*return PHYS_TO_DMAP(start);*/
1438 
1439 	va_start = *virtp;
1440 	va = va_start;
1441 
1442 	while (start < end) {
1443 		pmap_kenter_quick(va, start);
1444 		va += PAGE_SIZE;
1445 		start += PAGE_SIZE;
1446 	}
1447 	*virtp = va;
1448 	return va_start;
1449 }
1450 
1451 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
1452 
1453 /*
1454  * Remove the specified set of pages from the data and instruction caches.
1455  *
1456  * In contrast to pmap_invalidate_cache_range(), this function does not
1457  * rely on the CPU's self-snoop feature, because it is intended for use
1458  * when moving pages into a different cache domain.
1459  */
1460 void
1461 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1462 {
1463 	vm_offset_t daddr, eva;
1464 	int i;
1465 
1466 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1467 	    (cpu_feature & CPUID_CLFSH) == 0)
1468 		wbinvd();
1469 	else {
1470 		cpu_mfence();
1471 		for (i = 0; i < count; i++) {
1472 			daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1473 			eva = daddr + PAGE_SIZE;
1474 			for (; daddr < eva; daddr += cpu_clflush_line_size)
1475 				clflush(daddr);
1476 		}
1477 		cpu_mfence();
1478 	}
1479 }
1480 
1481 void
1482 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
1483 {
1484 	KASSERT((sva & PAGE_MASK) == 0,
1485 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
1486 	KASSERT((eva & PAGE_MASK) == 0,
1487 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
1488 
1489 	if (cpu_feature & CPUID_SS) {
1490 		; /* If "Self Snoop" is supported, do nothing. */
1491 	} else {
1492 		/* Globally invalidate caches */
1493 		cpu_wbinvd_on_all_cpus();
1494 	}
1495 }
1496 void
1497 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1498 {
1499 	smp_invlpg_range(pmap->pm_active, sva, eva);
1500 }
1501 
1502 /*
1503  * Add a list of wired pages to the kva
1504  * this routine is only used for temporary
1505  * kernel mappings that do not need to have
1506  * page modification or references recorded.
1507  * Note that old mappings are simply written
1508  * over.  The page *must* be wired.
1509  */
1510 void
1511 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1512 {
1513 	vm_offset_t end_va;
1514 
1515 	end_va = va + count * PAGE_SIZE;
1516 
1517 	while (va < end_va) {
1518 		pt_entry_t *pte;
1519 
1520 		pte = vtopte(va);
1521 		*pte = VM_PAGE_TO_PHYS(*m) |
1522 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
1523 		    kernel_pmap.pmap_bits[PG_V_IDX] |
1524 		    kernel_pmap.pmap_cache_bits[(*m)->pat_mode];
1525 //		pgeflag;
1526 		cpu_invlpg((void *)va);
1527 		va += PAGE_SIZE;
1528 		m++;
1529 	}
1530 	smp_invltlb();
1531 }
1532 
1533 /*
1534  * This routine jerks page mappings from the
1535  * kernel -- it is meant only for temporary mappings.
1536  *
1537  * MPSAFE, INTERRUPT SAFE (cluster callback)
1538  */
1539 void
1540 pmap_qremove(vm_offset_t va, int count)
1541 {
1542 	vm_offset_t end_va;
1543 
1544 	end_va = va + count * PAGE_SIZE;
1545 
1546 	while (va < end_va) {
1547 		pt_entry_t *pte;
1548 
1549 		pte = vtopte(va);
1550 		(void)pte_load_clear(pte);
1551 		cpu_invlpg((void *)va);
1552 		va += PAGE_SIZE;
1553 	}
1554 	smp_invltlb();
1555 }
1556 
1557 /*
1558  * Create a new thread and optionally associate it with a (new) process.
1559  * NOTE! the new thread's cpu may not equal the current cpu.
1560  */
1561 void
1562 pmap_init_thread(thread_t td)
1563 {
1564 	/* enforce pcb placement & alignment */
1565 	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
1566 	td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF);
1567 	td->td_savefpu = &td->td_pcb->pcb_save;
1568 	td->td_sp = (char *)td->td_pcb;	/* no -16 */
1569 }
1570 
1571 /*
1572  * This routine directly affects the fork perf for a process.
1573  */
1574 void
1575 pmap_init_proc(struct proc *p)
1576 {
1577 }
1578 
1579 static void
1580 pmap_pinit_defaults(struct pmap *pmap)
1581 {
1582 	bcopy(pmap_bits_default, pmap->pmap_bits,
1583 	      sizeof(pmap_bits_default));
1584 	bcopy(protection_codes, pmap->protection_codes,
1585 	      sizeof(protection_codes));
1586 	bcopy(pat_pte_index, pmap->pmap_cache_bits,
1587 	      sizeof(pat_pte_index));
1588 	pmap->pmap_cache_mask = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT;
1589 	pmap->copyinstr = std_copyinstr;
1590 	pmap->copyin = std_copyin;
1591 	pmap->copyout = std_copyout;
1592 	pmap->fubyte = std_fubyte;
1593 	pmap->subyte = std_subyte;
1594 	pmap->fuword = std_fuword;
1595 	pmap->suword = std_suword;
1596 	pmap->suword32 = std_suword32;
1597 }
1598 /*
1599  * Initialize pmap0/vmspace0.  This pmap is not added to pmap_list because
1600  * it, and IdlePTD, represents the template used to update all other pmaps.
1601  *
1602  * On architectures where the kernel pmap is not integrated into the user
1603  * process pmap, this pmap represents the process pmap, not the kernel pmap.
1604  * kernel_pmap should be used to directly access the kernel_pmap.
1605  */
1606 void
1607 pmap_pinit0(struct pmap *pmap)
1608 {
1609 	pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
1610 	pmap->pm_count = 1;
1611 	pmap->pm_active = 0;
1612 	pmap->pm_pvhint = NULL;
1613 	RB_INIT(&pmap->pm_pvroot);
1614 	spin_init(&pmap->pm_spin);
1615 	lwkt_token_init(&pmap->pm_token, "pmap_tok");
1616 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1617 	pmap_pinit_defaults(pmap);
1618 }
1619 
1620 /*
1621  * Initialize a preallocated and zeroed pmap structure,
1622  * such as one in a vmspace structure.
1623  */
1624 static void
1625 pmap_pinit_simple(struct pmap *pmap)
1626 {
1627 	/*
1628 	 * Misc initialization
1629 	 */
1630 	pmap->pm_count = 1;
1631 	pmap->pm_active = 0;
1632 	pmap->pm_pvhint = NULL;
1633 	pmap->pm_flags = PMAP_FLAG_SIMPLE;
1634 
1635 	pmap_pinit_defaults(pmap);
1636 
1637 	/*
1638 	 * Don't blow up locks/tokens on re-use (XXX fix/use drop code
1639 	 * for this).
1640 	 */
1641 	if (pmap->pm_pmlpv == NULL) {
1642 		RB_INIT(&pmap->pm_pvroot);
1643 		bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1644 		spin_init(&pmap->pm_spin);
1645 		lwkt_token_init(&pmap->pm_token, "pmap_tok");
1646 	}
1647 }
1648 
1649 void
1650 pmap_pinit(struct pmap *pmap)
1651 {
1652 	pv_entry_t pv;
1653 	int j;
1654 
1655 	if (pmap->pm_pmlpv) {
1656 		if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) {
1657 			pmap_puninit(pmap);
1658 		}
1659 	}
1660 
1661 	pmap_pinit_simple(pmap);
1662 	pmap->pm_flags &= ~PMAP_FLAG_SIMPLE;
1663 
1664 	/*
1665 	 * No need to allocate page table space yet but we do need a valid
1666 	 * page directory table.
1667 	 */
1668 	if (pmap->pm_pml4 == NULL) {
1669 		pmap->pm_pml4 =
1670 		    (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
1671 	}
1672 
1673 	/*
1674 	 * Allocate the page directory page, which wires it even though
1675 	 * it isn't being entered into some higher level page table (it
1676 	 * being the highest level).  If one is already cached we don't
1677 	 * have to do anything.
1678 	 */
1679 	if ((pv = pmap->pm_pmlpv) == NULL) {
1680 		pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
1681 		pmap->pm_pmlpv = pv;
1682 		pmap_kenter((vm_offset_t)pmap->pm_pml4,
1683 			    VM_PAGE_TO_PHYS(pv->pv_m));
1684 		pv_put(pv);
1685 
1686 		/*
1687 		 * Install DMAP and KMAP.
1688 		 */
1689 		for (j = 0; j < NDMPML4E; ++j) {
1690 			pmap->pm_pml4[DMPML4I + j] =
1691 			    (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) |
1692 			    pmap->pmap_bits[PG_RW_IDX] |
1693 			    pmap->pmap_bits[PG_V_IDX] |
1694 			    pmap->pmap_bits[PG_U_IDX];
1695 		}
1696 		pmap->pm_pml4[KPML4I] = KPDPphys |
1697 		    pmap->pmap_bits[PG_RW_IDX] |
1698 		    pmap->pmap_bits[PG_V_IDX] |
1699 		    pmap->pmap_bits[PG_U_IDX];
1700 
1701 		/*
1702 		 * install self-referential address mapping entry
1703 		 */
1704 		pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) |
1705 		    pmap->pmap_bits[PG_V_IDX] |
1706 		    pmap->pmap_bits[PG_RW_IDX] |
1707 		    pmap->pmap_bits[PG_A_IDX] |
1708 		    pmap->pmap_bits[PG_M_IDX];
1709 	} else {
1710 		KKASSERT(pv->pv_m->flags & PG_MAPPED);
1711 		KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
1712 	}
1713 	KKASSERT(pmap->pm_pml4[255] == 0);
1714 	KKASSERT(RB_ROOT(&pmap->pm_pvroot) == pv);
1715 	KKASSERT(pv->pv_entry.rbe_left == NULL);
1716 	KKASSERT(pv->pv_entry.rbe_right == NULL);
1717 }
1718 
1719 /*
1720  * Clean up a pmap structure so it can be physically freed.  This routine
1721  * is called by the vmspace dtor function.  A great deal of pmap data is
1722  * left passively mapped to improve vmspace management so we have a bit
1723  * of cleanup work to do here.
1724  */
1725 void
1726 pmap_puninit(pmap_t pmap)
1727 {
1728 	pv_entry_t pv;
1729 	vm_page_t p;
1730 
1731 	KKASSERT(pmap->pm_active == 0);
1732 	if ((pv = pmap->pm_pmlpv) != NULL) {
1733 		if (pv_hold_try(pv) == 0)
1734 			pv_lock(pv);
1735 		KKASSERT(pv == pmap->pm_pmlpv);
1736 		p = pmap_remove_pv_page(pv);
1737 		pv_free(pv);
1738 		pmap_kremove((vm_offset_t)pmap->pm_pml4);
1739 		vm_page_busy_wait(p, FALSE, "pgpun");
1740 		KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
1741 		vm_page_unwire(p, 0);
1742 		vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
1743 
1744 		/*
1745 		 * XXX eventually clean out PML4 static entries and
1746 		 * use vm_page_free_zero()
1747 		 */
1748 		vm_page_free(p);
1749 		pmap->pm_pmlpv = NULL;
1750 	}
1751 	if (pmap->pm_pml4) {
1752 		KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys));
1753 		kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE);
1754 		pmap->pm_pml4 = NULL;
1755 	}
1756 	KKASSERT(pmap->pm_stats.resident_count == 0);
1757 	KKASSERT(pmap->pm_stats.wired_count == 0);
1758 }
1759 
1760 /*
1761  * Wire in kernel global address entries.  To avoid a race condition
1762  * between pmap initialization and pmap_growkernel, this procedure
1763  * adds the pmap to the master list (which growkernel scans to update),
1764  * then copies the template.
1765  */
1766 void
1767 pmap_pinit2(struct pmap *pmap)
1768 {
1769 	spin_lock(&pmap_spin);
1770 	TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
1771 	spin_unlock(&pmap_spin);
1772 }
1773 
1774 /*
1775  * This routine is called when various levels in the page table need to
1776  * be populated.  This routine cannot fail.
1777  *
1778  * This function returns two locked pv_entry's, one representing the
1779  * requested pv and one representing the requested pv's parent pv.  If
1780  * the pv did not previously exist it will be mapped into its parent
1781  * and wired, otherwise no additional wire count will be added.
1782  */
1783 static
1784 pv_entry_t
1785 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp)
1786 {
1787 	pt_entry_t *ptep;
1788 	pv_entry_t pv;
1789 	pv_entry_t pvp;
1790 	vm_pindex_t pt_pindex;
1791 	vm_page_t m;
1792 	int isnew;
1793 	int ispt;
1794 
1795 	/*
1796 	 * If the pv already exists and we aren't being asked for the
1797 	 * parent page table page we can just return it.  A locked+held pv
1798 	 * is returned.  The pv will also have a second hold related to the
1799 	 * pmap association that we don't have to worry about.
1800 	 */
1801 	ispt = 0;
1802 	pv = pv_alloc(pmap, ptepindex, &isnew);
1803 	if (isnew == 0 && pvpp == NULL)
1804 		return(pv);
1805 
1806 	/*
1807 	 * Special case terminal PVs.  These are not page table pages so
1808 	 * no vm_page is allocated (the caller supplied the vm_page).  If
1809 	 * pvpp is non-NULL we are being asked to also removed the pt_pv
1810 	 * for this pv.
1811 	 *
1812 	 * Note that pt_pv's are only returned for user VAs. We assert that
1813 	 * a pt_pv is not being requested for kernel VAs.
1814 	 */
1815 	if (ptepindex < pmap_pt_pindex(0)) {
1816 		if (ptepindex >= NUPTE_USER)
1817 			KKASSERT(pvpp == NULL);
1818 		else
1819 			KKASSERT(pvpp != NULL);
1820 		if (pvpp) {
1821 			pt_pindex = NUPTE_TOTAL + (ptepindex >> NPTEPGSHIFT);
1822 			pvp = pmap_allocpte(pmap, pt_pindex, NULL);
1823 			if (isnew)
1824 				vm_page_wire_quick(pvp->pv_m);
1825 			*pvpp = pvp;
1826 		} else {
1827 			pvp = NULL;
1828 		}
1829 		return(pv);
1830 	}
1831 
1832 	/*
1833 	 * Non-terminal PVs allocate a VM page to represent the page table,
1834 	 * so we have to resolve pvp and calculate ptepindex for the pvp
1835 	 * and then for the page table entry index in the pvp for
1836 	 * fall-through.
1837 	 */
1838 	if (ptepindex < pmap_pd_pindex(0)) {
1839 		/*
1840 		 * pv is PT, pvp is PD
1841 		 */
1842 		ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT;
1843 		ptepindex += NUPTE_TOTAL + NUPT_TOTAL;
1844 		pvp = pmap_allocpte(pmap, ptepindex, NULL);
1845 		if (!isnew)
1846 			goto notnew;
1847 
1848 		/*
1849 		 * PT index in PD
1850 		 */
1851 		ptepindex = pv->pv_pindex - pmap_pt_pindex(0);
1852 		ptepindex &= ((1ul << NPDEPGSHIFT) - 1);
1853 		ispt = 1;
1854 	} else if (ptepindex < pmap_pdp_pindex(0)) {
1855 		/*
1856 		 * pv is PD, pvp is PDP
1857 		 *
1858 		 * SIMPLE PMAP NOTE: Simple pmaps do not allocate above
1859 		 *		     the PD.
1860 		 */
1861 		ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT;
1862 		ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
1863 
1864 		if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
1865 			KKASSERT(pvpp == NULL);
1866 			pvp = NULL;
1867 		} else {
1868 			pvp = pmap_allocpte(pmap, ptepindex, NULL);
1869 		}
1870 		if (!isnew)
1871 			goto notnew;
1872 
1873 		/*
1874 		 * PD index in PDP
1875 		 */
1876 		ptepindex = pv->pv_pindex - pmap_pd_pindex(0);
1877 		ptepindex &= ((1ul << NPDPEPGSHIFT) - 1);
1878 	} else if (ptepindex < pmap_pml4_pindex()) {
1879 		/*
1880 		 * pv is PDP, pvp is the root pml4 table
1881 		 */
1882 		pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
1883 		if (!isnew)
1884 			goto notnew;
1885 
1886 		/*
1887 		 * PDP index in PML4
1888 		 */
1889 		ptepindex = pv->pv_pindex - pmap_pdp_pindex(0);
1890 		ptepindex &= ((1ul << NPML4EPGSHIFT) - 1);
1891 	} else {
1892 		/*
1893 		 * pv represents the top-level PML4, there is no parent.
1894 		 */
1895 		pvp = NULL;
1896 		if (!isnew)
1897 			goto notnew;
1898 	}
1899 
1900 	/*
1901 	 * This code is only reached if isnew is TRUE and this is not a
1902 	 * terminal PV.  We need to allocate a vm_page for the page table
1903 	 * at this level and enter it into the parent page table.
1904 	 *
1905 	 * page table pages are marked PG_WRITEABLE and PG_MAPPED.
1906 	 */
1907 	for (;;) {
1908 		m = vm_page_alloc(NULL, pv->pv_pindex,
1909 				  VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
1910 				  VM_ALLOC_INTERRUPT);
1911 		if (m)
1912 			break;
1913 		vm_wait(0);
1914 	}
1915 	vm_page_spin_lock(m);
1916 	pmap_page_stats_adding(m);
1917 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1918 	pv->pv_m = m;
1919 	vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1920 	vm_page_spin_unlock(m);
1921 	vm_page_unmanage(m);	/* m must be spinunlocked */
1922 
1923 	if ((m->flags & PG_ZERO) == 0) {
1924 		pmap_zero_page(VM_PAGE_TO_PHYS(m));
1925 	}
1926 #ifdef PMAP_DEBUG
1927 	else {
1928 		pmap_page_assertzero(VM_PAGE_TO_PHYS(m));
1929 	}
1930 #endif
1931 	m->valid = VM_PAGE_BITS_ALL;
1932 	vm_page_flag_clear(m, PG_ZERO);
1933 	vm_page_wire(m);	/* wire for mapping in parent */
1934 
1935 	/*
1936 	 * Wire the page into pvp, bump the wire-count for pvp's page table
1937 	 * page.  Bump the resident_count for the pmap.  There is no pvp
1938 	 * for the top level, address the pm_pml4[] array directly.
1939 	 *
1940 	 * If the caller wants the parent we return it, otherwise
1941 	 * we just put it away.
1942 	 *
1943 	 * No interlock is needed for pte 0 -> non-zero.
1944 	 *
1945 	 * In the situation where *ptep is valid we might have an unmanaged
1946 	 * page table page shared from another page table which we need to
1947 	 * unshare before installing our private page table page.
1948 	 */
1949 	if (pvp) {
1950 		ptep = pv_pte_lookup(pvp, ptepindex);
1951 		if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1952 			pt_entry_t pte;
1953 			pmap_inval_info info;
1954 
1955 			if (ispt == 0) {
1956 				panic("pmap_allocpte: unexpected pte %p/%d",
1957 				      pvp, (int)ptepindex);
1958 			}
1959 			pmap_inval_init(&info);
1960 			pmap_inval_interlock(&info, pmap, (vm_offset_t)-1);
1961 			pte = pte_load_clear(ptep);
1962 			pmap_inval_deinterlock(&info, pmap);
1963 			pmap_inval_done(&info);
1964 			if (vm_page_unwire_quick(
1965 					PHYS_TO_VM_PAGE(pte & PG_FRAME))) {
1966 				panic("pmap_allocpte: shared pgtable "
1967 				      "pg bad wirecount");
1968 			}
1969 			atomic_add_long(&pmap->pm_stats.resident_count, -1);
1970 		} else {
1971 			vm_page_wire_quick(pvp->pv_m);
1972 		}
1973 		*ptep = VM_PAGE_TO_PHYS(m) |
1974 		    (pmap->pmap_bits[PG_U_IDX] |
1975 		    pmap->pmap_bits[PG_RW_IDX] |
1976 		    pmap->pmap_bits[PG_V_IDX] |
1977 		    pmap->pmap_bits[PG_A_IDX] |
1978 		    pmap->pmap_bits[PG_M_IDX]);
1979 	}
1980 	vm_page_wakeup(m);
1981 notnew:
1982 	if (pvpp)
1983 		*pvpp = pvp;
1984 	else if (pvp)
1985 		pv_put(pvp);
1986 	return (pv);
1987 }
1988 
1989 /*
1990  * This version of pmap_allocpte() checks for possible segment optimizations
1991  * that would allow page-table sharing.  It can be called for terminal
1992  * page or page table page ptepindex's.
1993  *
1994  * The function is called with page table page ptepindex's for fictitious
1995  * and unmanaged terminal pages.  That is, we don't want to allocate a
1996  * terminal pv, we just want the pt_pv.  pvpp is usually passed as NULL
1997  * for this case.
1998  *
1999  * This function can return a pv and *pvpp associated with the passed in pmap
2000  * OR a pv and *pvpp associated with the shared pmap.  In the latter case
2001  * an unmanaged page table page will be entered into the pass in pmap.
2002  */
2003 static
2004 pv_entry_t
2005 pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp,
2006 		  vm_map_entry_t entry, vm_offset_t va)
2007 {
2008 	struct pmap_inval_info info;
2009 	vm_object_t object;
2010 	pmap_t obpmap;
2011 	pmap_t *obpmapp;
2012 	vm_offset_t b;
2013 	pv_entry_t pte_pv;	/* in original or shared pmap */
2014 	pv_entry_t pt_pv;	/* in original or shared pmap */
2015 	pv_entry_t proc_pd_pv;	/* in original pmap */
2016 	pv_entry_t proc_pt_pv;	/* in original pmap */
2017 	pv_entry_t xpv;		/* PT in shared pmap */
2018 	pd_entry_t *pt;		/* PT entry in PD of original pmap */
2019 	pd_entry_t opte;	/* contents of *pt */
2020 	pd_entry_t npte;	/* contents of *pt */
2021 	vm_page_t m;
2022 
2023 retry:
2024 	/*
2025 	 * Basic tests, require a non-NULL vm_map_entry, require proper
2026 	 * alignment and type for the vm_map_entry, require that the
2027 	 * underlying object already be allocated.
2028 	 *
2029 	 * We currently allow any type of object to use this optimization.
2030 	 * The object itself does NOT have to be sized to a multiple of the
2031 	 * segment size, but the memory mapping does.
2032 	 *
2033 	 * XXX don't handle devices currently, because VM_PAGE_TO_PHYS()
2034 	 *     won't work as expected.
2035 	 */
2036 	if (entry == NULL ||
2037 	    pmap_mmu_optimize == 0 ||			/* not enabled */
2038 	    ptepindex >= pmap_pd_pindex(0) ||		/* not terminal */
2039 	    entry->inheritance != VM_INHERIT_SHARE ||	/* not shared */
2040 	    entry->maptype != VM_MAPTYPE_NORMAL ||	/* weird map type */
2041 	    entry->object.vm_object == NULL ||		/* needs VM object */
2042 	    entry->object.vm_object->type == OBJT_DEVICE ||	/* ick */
2043 	    entry->object.vm_object->type == OBJT_MGTDEVICE ||	/* ick */
2044 	    (entry->offset & SEG_MASK) ||		/* must be aligned */
2045 	    (entry->start & SEG_MASK)) {
2046 		return(pmap_allocpte(pmap, ptepindex, pvpp));
2047 	}
2048 
2049 	/*
2050 	 * Make sure the full segment can be represented.
2051 	 */
2052 	b = va & ~(vm_offset_t)SEG_MASK;
2053 	if (b < entry->start && b + SEG_SIZE > entry->end)
2054 		return(pmap_allocpte(pmap, ptepindex, pvpp));
2055 
2056 	/*
2057 	 * If the full segment can be represented dive the VM object's
2058 	 * shared pmap, allocating as required.
2059 	 */
2060 	object = entry->object.vm_object;
2061 
2062 	if (entry->protection & VM_PROT_WRITE)
2063 		obpmapp = &object->md.pmap_rw;
2064 	else
2065 		obpmapp = &object->md.pmap_ro;
2066 
2067 	/*
2068 	 * We allocate what appears to be a normal pmap but because portions
2069 	 * of this pmap are shared with other unrelated pmaps we have to
2070 	 * set pm_active to point to all cpus.
2071 	 *
2072 	 * XXX Currently using pmap_spin to interlock the update, can't use
2073 	 *     vm_object_hold/drop because the token might already be held
2074 	 *     shared OR exclusive and we don't know.
2075 	 */
2076 	while ((obpmap = *obpmapp) == NULL) {
2077 		obpmap = kmalloc(sizeof(*obpmap), M_OBJPMAP, M_WAITOK|M_ZERO);
2078 		pmap_pinit_simple(obpmap);
2079 		pmap_pinit2(obpmap);
2080 		spin_lock(&pmap_spin);
2081 		if (*obpmapp != NULL) {
2082 			/*
2083 			 * Handle race
2084 			 */
2085 			spin_unlock(&pmap_spin);
2086 			pmap_release(obpmap);
2087 			pmap_puninit(obpmap);
2088 			kfree(obpmap, M_OBJPMAP);
2089 		} else {
2090 			obpmap->pm_active = smp_active_mask;
2091 			*obpmapp = obpmap;
2092 			spin_unlock(&pmap_spin);
2093 		}
2094 	}
2095 
2096 	/*
2097 	 * Layering is: PTE, PT, PD, PDP, PML4.  We have to return the
2098 	 * pte/pt using the shared pmap from the object but also adjust
2099 	 * the process pmap's page table page as a side effect.
2100 	 */
2101 
2102 	/*
2103 	 * Resolve the terminal PTE and PT in the shared pmap.  This is what
2104 	 * we will return.  This is true if ptepindex represents a terminal
2105 	 * page, otherwise pte_pv is actually the PT and pt_pv is actually
2106 	 * the PD.
2107 	 */
2108 	pt_pv = NULL;
2109 	pte_pv = pmap_allocpte(obpmap, ptepindex, &pt_pv);
2110 	if (ptepindex >= pmap_pt_pindex(0))
2111 		xpv = pte_pv;
2112 	else
2113 		xpv = pt_pv;
2114 
2115 	/*
2116 	 * Resolve the PD in the process pmap so we can properly share the
2117 	 * page table page.  Lock order is bottom-up (leaf first)!
2118 	 *
2119 	 * NOTE: proc_pt_pv can be NULL.
2120 	 */
2121 	proc_pt_pv = pv_get(pmap, pmap_pt_pindex(b));
2122 	proc_pd_pv = pmap_allocpte(pmap, pmap_pd_pindex(b), NULL);
2123 
2124 	/*
2125 	 * xpv is the page table page pv from the shared object
2126 	 * (for convenience).
2127 	 *
2128 	 * Calculate the pte value for the PT to load into the process PD.
2129 	 * If we have to change it we must properly dispose of the previous
2130 	 * entry.
2131 	 */
2132 	pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2133 	npte = VM_PAGE_TO_PHYS(xpv->pv_m) |
2134 	    (pmap->pmap_bits[PG_U_IDX] |
2135 	    pmap->pmap_bits[PG_RW_IDX] |
2136 	    pmap->pmap_bits[PG_V_IDX] |
2137 	    pmap->pmap_bits[PG_A_IDX] |
2138 	    pmap->pmap_bits[PG_M_IDX]);
2139 
2140 	/*
2141 	 * Dispose of previous page table page if it was local to the
2142 	 * process pmap.  If the old pt is not empty we cannot dispose of it
2143 	 * until we clean it out.  This case should not arise very often so
2144 	 * it is not optimized.
2145 	 */
2146 	if (proc_pt_pv) {
2147 		if (proc_pt_pv->pv_m->wire_count != 1) {
2148 			pv_put(proc_pd_pv);
2149 			pv_put(proc_pt_pv);
2150 			pv_put(pt_pv);
2151 			pv_put(pte_pv);
2152 			pmap_remove(pmap,
2153 				    va & ~(vm_offset_t)SEG_MASK,
2154 				    (va + SEG_SIZE) & ~(vm_offset_t)SEG_MASK);
2155 			goto retry;
2156 		}
2157 		pmap_release_pv(proc_pt_pv, proc_pd_pv);
2158 		proc_pt_pv = NULL;
2159 		/* relookup */
2160 		pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2161 	}
2162 
2163 	/*
2164 	 * Handle remaining cases.
2165 	 */
2166 	if (*pt == 0) {
2167 		*pt = npte;
2168 		vm_page_wire_quick(xpv->pv_m);
2169 		vm_page_wire_quick(proc_pd_pv->pv_m);
2170 		atomic_add_long(&pmap->pm_stats.resident_count, 1);
2171 	} else if (*pt != npte) {
2172 		pmap_inval_init(&info);
2173 		pmap_inval_interlock(&info, pmap, (vm_offset_t)-1);
2174 
2175 		opte = pte_load_clear(pt);
2176 		KKASSERT(opte && opte != npte);
2177 
2178 		*pt = npte;
2179 		vm_page_wire_quick(xpv->pv_m);	/* pgtable pg that is npte */
2180 
2181 		/*
2182 		 * Clean up opte, bump the wire_count for the process
2183 		 * PD page representing the new entry if it was
2184 		 * previously empty.
2185 		 *
2186 		 * If the entry was not previously empty and we have
2187 		 * a PT in the proc pmap then opte must match that
2188 		 * pt.  The proc pt must be retired (this is done
2189 		 * later on in this procedure).
2190 		 *
2191 		 * NOTE: replacing valid pte, wire_count on proc_pd_pv
2192 		 * stays the same.
2193 		 */
2194 		KKASSERT(opte & pmap->pmap_bits[PG_V_IDX]);
2195 		m = PHYS_TO_VM_PAGE(opte & PG_FRAME);
2196 		if (vm_page_unwire_quick(m)) {
2197 			panic("pmap_allocpte_seg: "
2198 			      "bad wire count %p",
2199 			      m);
2200 		}
2201 
2202 		pmap_inval_deinterlock(&info, pmap);
2203 		pmap_inval_done(&info);
2204 	}
2205 
2206 	/*
2207 	 * The existing process page table was replaced and must be destroyed
2208 	 * here.
2209 	 */
2210 	if (proc_pd_pv)
2211 		pv_put(proc_pd_pv);
2212 	if (pvpp)
2213 		*pvpp = pt_pv;
2214 	else
2215 		pv_put(pt_pv);
2216 
2217 	return (pte_pv);
2218 }
2219 
2220 /*
2221  * Release any resources held by the given physical map.
2222  *
2223  * Called when a pmap initialized by pmap_pinit is being released.  Should
2224  * only be called if the map contains no valid mappings.
2225  *
2226  * Caller must hold pmap->pm_token
2227  */
2228 struct pmap_release_info {
2229 	pmap_t	pmap;
2230 	int	retry;
2231 };
2232 
2233 static int pmap_release_callback(pv_entry_t pv, void *data);
2234 
2235 void
2236 pmap_release(struct pmap *pmap)
2237 {
2238 	struct pmap_release_info info;
2239 
2240 	KASSERT(pmap->pm_active == 0,
2241 		("pmap still active! %016jx", (uintmax_t)pmap->pm_active));
2242 
2243 	spin_lock(&pmap_spin);
2244 	TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
2245 	spin_unlock(&pmap_spin);
2246 
2247 	/*
2248 	 * Pull pv's off the RB tree in order from low to high and release
2249 	 * each page.
2250 	 */
2251 	info.pmap = pmap;
2252 	do {
2253 		info.retry = 0;
2254 		spin_lock(&pmap->pm_spin);
2255 		RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL,
2256 			pmap_release_callback, &info);
2257 		spin_unlock(&pmap->pm_spin);
2258 	} while (info.retry);
2259 
2260 
2261 	/*
2262 	 * One resident page (the pml4 page) should remain.
2263 	 * No wired pages should remain.
2264 	 */
2265 	KKASSERT(pmap->pm_stats.resident_count ==
2266 		 ((pmap->pm_flags & PMAP_FLAG_SIMPLE) ? 0 : 1));
2267 
2268 	KKASSERT(pmap->pm_stats.wired_count == 0);
2269 }
2270 
2271 static int
2272 pmap_release_callback(pv_entry_t pv, void *data)
2273 {
2274 	struct pmap_release_info *info = data;
2275 	pmap_t pmap = info->pmap;
2276 	int r;
2277 
2278 	if (pv_hold_try(pv)) {
2279 		spin_unlock(&pmap->pm_spin);
2280 	} else {
2281 		spin_unlock(&pmap->pm_spin);
2282 		pv_lock(pv);
2283 	}
2284 	if (pv->pv_pmap != pmap) {
2285 		pv_put(pv);
2286 		spin_lock(&pmap->pm_spin);
2287 		info->retry = 1;
2288 		return(-1);
2289 	}
2290 	r = pmap_release_pv(pv, NULL);
2291 	spin_lock(&pmap->pm_spin);
2292 	return(r);
2293 }
2294 
2295 /*
2296  * Called with held (i.e. also locked) pv.  This function will dispose of
2297  * the lock along with the pv.
2298  *
2299  * If the caller already holds the locked parent page table for pv it
2300  * must pass it as pvp, allowing us to avoid a deadlock, else it can
2301  * pass NULL for pvp.
2302  */
2303 static int
2304 pmap_release_pv(pv_entry_t pv, pv_entry_t pvp)
2305 {
2306 	vm_page_t p;
2307 
2308 	/*
2309 	 * The pmap is currently not spinlocked, pv is held+locked.
2310 	 * Remove the pv's page from its parent's page table.  The
2311 	 * parent's page table page's wire_count will be decremented.
2312 	 */
2313 	pmap_remove_pv_pte(pv, pvp, NULL);
2314 
2315 	/*
2316 	 * Terminal pvs are unhooked from their vm_pages.  Because
2317 	 * terminal pages aren't page table pages they aren't wired
2318 	 * by us, so we have to be sure not to unwire them either.
2319 	 */
2320 	if (pv->pv_pindex < pmap_pt_pindex(0)) {
2321 		pmap_remove_pv_page(pv);
2322 		goto skip;
2323 	}
2324 
2325 	/*
2326 	 * We leave the top-level page table page cached, wired, and
2327 	 * mapped in the pmap until the dtor function (pmap_puninit())
2328 	 * gets called.
2329 	 *
2330 	 * Since we are leaving the top-level pv intact we need
2331 	 * to break out of what would otherwise be an infinite loop.
2332 	 */
2333 	if (pv->pv_pindex == pmap_pml4_pindex()) {
2334 		pv_put(pv);
2335 		return(-1);
2336 	}
2337 
2338 	/*
2339 	 * For page table pages (other than the top-level page),
2340 	 * remove and free the vm_page.  The representitive mapping
2341 	 * removed above by pmap_remove_pv_pte() did not undo the
2342 	 * last wire_count so we have to do that as well.
2343 	 */
2344 	p = pmap_remove_pv_page(pv);
2345 	vm_page_busy_wait(p, FALSE, "pmaprl");
2346 	if (p->wire_count != 1) {
2347 		kprintf("p->wire_count was %016lx %d\n",
2348 			pv->pv_pindex, p->wire_count);
2349 	}
2350 	KKASSERT(p->wire_count == 1);
2351 	KKASSERT(p->flags & PG_UNMANAGED);
2352 
2353 	vm_page_unwire(p, 0);
2354 	KKASSERT(p->wire_count == 0);
2355 
2356 	/*
2357 	 * Theoretically this page, if not the pml4 page, should contain
2358 	 * all-zeros.  But its just too dangerous to mark it PG_ZERO.  Free
2359 	 * normally.
2360 	 */
2361 	vm_page_free(p);
2362 skip:
2363 	pv_free(pv);
2364 	return 0;
2365 }
2366 
2367 /*
2368  * This function will remove the pte associated with a pv from its parent.
2369  * Terminal pv's are supported.  The removal will be interlocked if info
2370  * is non-NULL.  The caller must dispose of pv instead of just unlocking
2371  * it.
2372  *
2373  * The wire count will be dropped on the parent page table.  The wire
2374  * count on the page being removed (pv->pv_m) from the parent page table
2375  * is NOT touched.  Note that terminal pages will not have any additional
2376  * wire counts while page table pages will have at least one representing
2377  * the mapping, plus others representing sub-mappings.
2378  *
2379  * NOTE: Cannot be called on kernel page table pages, only KVM terminal
2380  *	 pages and user page table and terminal pages.
2381  *
2382  * The pv must be locked.
2383  *
2384  * XXX must lock parent pv's if they exist to remove pte XXX
2385  */
2386 static
2387 void
2388 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, struct pmap_inval_info *info)
2389 {
2390 	vm_pindex_t ptepindex = pv->pv_pindex;
2391 	pmap_t pmap = pv->pv_pmap;
2392 	vm_page_t p;
2393 	int gotpvp = 0;
2394 
2395 	KKASSERT(pmap);
2396 
2397 	if (ptepindex == pmap_pml4_pindex()) {
2398 		/*
2399 		 * We are the top level pml4 table, there is no parent.
2400 		 */
2401 		p = pmap->pm_pmlpv->pv_m;
2402 	} else if (ptepindex >= pmap_pdp_pindex(0)) {
2403 		/*
2404 		 * Remove a PDP page from the pml4e.  This can only occur
2405 		 * with user page tables.  We do not have to lock the
2406 		 * pml4 PV so just ignore pvp.
2407 		 */
2408 		vm_pindex_t pml4_pindex;
2409 		vm_pindex_t pdp_index;
2410 		pml4_entry_t *pdp;
2411 
2412 		pdp_index = ptepindex - pmap_pdp_pindex(0);
2413 		if (pvp == NULL) {
2414 			pml4_pindex = pmap_pml4_pindex();
2415 			pvp = pv_get(pv->pv_pmap, pml4_pindex);
2416 			KKASSERT(pvp);
2417 			gotpvp = 1;
2418 		}
2419 		pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)];
2420 		KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0);
2421 		p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
2422 		*pdp = 0;
2423 		KKASSERT(info == NULL);
2424 	} else if (ptepindex >= pmap_pd_pindex(0)) {
2425 		/*
2426 		 * Remove a PD page from the pdp
2427 		 *
2428 		 * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case
2429 		 *		     of a simple pmap because it stops at
2430 		 *		     the PD page.
2431 		 */
2432 		vm_pindex_t pdp_pindex;
2433 		vm_pindex_t pd_index;
2434 		pdp_entry_t *pd;
2435 
2436 		pd_index = ptepindex - pmap_pd_pindex(0);
2437 
2438 		if (pvp == NULL) {
2439 			pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
2440 				     (pd_index >> NPML4EPGSHIFT);
2441 			pvp = pv_get(pv->pv_pmap, pdp_pindex);
2442 			if (pvp)
2443 				gotpvp = 1;
2444 		}
2445 		if (pvp) {
2446 			pd = pv_pte_lookup(pvp, pd_index &
2447 						((1ul << NPDPEPGSHIFT) - 1));
2448 			KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0);
2449 			p = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
2450 			*pd = 0;
2451 		} else {
2452 			KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE);
2453 			p = pv->pv_m;		/* degenerate test later */
2454 		}
2455 		KKASSERT(info == NULL);
2456 	} else if (ptepindex >= pmap_pt_pindex(0)) {
2457 		/*
2458 		 *  Remove a PT page from the pd
2459 		 */
2460 		vm_pindex_t pd_pindex;
2461 		vm_pindex_t pt_index;
2462 		pd_entry_t *pt;
2463 
2464 		pt_index = ptepindex - pmap_pt_pindex(0);
2465 
2466 		if (pvp == NULL) {
2467 			pd_pindex = NUPTE_TOTAL + NUPT_TOTAL +
2468 				    (pt_index >> NPDPEPGSHIFT);
2469 			pvp = pv_get(pv->pv_pmap, pd_pindex);
2470 			KKASSERT(pvp);
2471 			gotpvp = 1;
2472 		}
2473 		pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1));
2474 		KKASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0);
2475 		p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
2476 		*pt = 0;
2477 		KKASSERT(info == NULL);
2478 	} else {
2479 		/*
2480 		 * Remove a PTE from the PT page
2481 		 *
2482 		 * NOTE: pv's must be locked bottom-up to avoid deadlocking.
2483 		 *	 pv is a pte_pv so we can safely lock pt_pv.
2484 		 *
2485 		 * NOTE: FICTITIOUS pages may have multiple physical mappings
2486 		 *	 so PHYS_TO_VM_PAGE() will not necessarily work for
2487 		 *	 terminal ptes.
2488 		 */
2489 		vm_pindex_t pt_pindex;
2490 		pt_entry_t *ptep;
2491 		pt_entry_t pte;
2492 		vm_offset_t va;
2493 
2494 		pt_pindex = ptepindex >> NPTEPGSHIFT;
2495 		va = (vm_offset_t)ptepindex << PAGE_SHIFT;
2496 
2497 		if (ptepindex >= NUPTE_USER) {
2498 			ptep = vtopte(ptepindex << PAGE_SHIFT);
2499 			KKASSERT(pvp == NULL);
2500 		} else {
2501 			if (pvp == NULL) {
2502 				pt_pindex = NUPTE_TOTAL +
2503 					    (ptepindex >> NPDPEPGSHIFT);
2504 				pvp = pv_get(pv->pv_pmap, pt_pindex);
2505 				KKASSERT(pvp);
2506 				gotpvp = 1;
2507 			}
2508 			ptep = pv_pte_lookup(pvp, ptepindex &
2509 						  ((1ul << NPDPEPGSHIFT) - 1));
2510 		}
2511 
2512 		if (info)
2513 			pmap_inval_interlock(info, pmap, va);
2514 		pte = pte_load_clear(ptep);
2515 		if (info)
2516 			pmap_inval_deinterlock(info, pmap);
2517 		else
2518 			cpu_invlpg((void *)va);
2519 
2520 		/*
2521 		 * Now update the vm_page_t
2522 		 */
2523 		if ((pte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) !=
2524 		    (pmap->pmap_bits[PG_MANAGED_IDX]|pmap->pmap_bits[PG_V_IDX])) {
2525 			kprintf("remove_pte badpte %016lx %016lx %d\n",
2526 				pte, pv->pv_pindex,
2527 				pv->pv_pindex < pmap_pt_pindex(0));
2528 		}
2529 		/* PHYS_TO_VM_PAGE() will not work for FICTITIOUS pages */
2530 		/*KKASSERT((pte & (PG_MANAGED|PG_V)) == (PG_MANAGED|PG_V));*/
2531 		if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
2532 			p = pv->pv_m;
2533 		else
2534 			p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
2535 		/* p = pv->pv_m; */
2536 
2537 		if (pte & pmap->pmap_bits[PG_M_IDX]) {
2538 			if (pmap_track_modified(ptepindex))
2539 				vm_page_dirty(p);
2540 		}
2541 		if (pte & pmap->pmap_bits[PG_A_IDX]) {
2542 			vm_page_flag_set(p, PG_REFERENCED);
2543 		}
2544 		if (pte & pmap->pmap_bits[PG_W_IDX])
2545 			atomic_add_long(&pmap->pm_stats.wired_count, -1);
2546 		if (pte & pmap->pmap_bits[PG_G_IDX])
2547 			cpu_invlpg((void *)va);
2548 	}
2549 
2550 	/*
2551 	 * Unwire the parent page table page.  The wire_count cannot go below
2552 	 * 1 here because the parent page table page is itself still mapped.
2553 	 *
2554 	 * XXX remove the assertions later.
2555 	 */
2556 	KKASSERT(pv->pv_m == p);
2557 	if (pvp && vm_page_unwire_quick(pvp->pv_m))
2558 		panic("pmap_remove_pv_pte: Insufficient wire_count");
2559 
2560 	if (gotpvp)
2561 		pv_put(pvp);
2562 }
2563 
2564 /*
2565  * Remove the vm_page association to a pv.  The pv must be locked.
2566  */
2567 static
2568 vm_page_t
2569 pmap_remove_pv_page(pv_entry_t pv)
2570 {
2571 	vm_page_t m;
2572 
2573 	m = pv->pv_m;
2574 	KKASSERT(m);
2575 	vm_page_spin_lock(m);
2576 	pv->pv_m = NULL;
2577 	TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2578 	pmap_page_stats_deleting(m);
2579 	/*
2580 	if (m->object)
2581 		atomic_add_int(&m->object->agg_pv_list_count, -1);
2582 	*/
2583 	if (TAILQ_EMPTY(&m->md.pv_list))
2584 		vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
2585 	vm_page_spin_unlock(m);
2586 	return(m);
2587 }
2588 
2589 /*
2590  * Grow the number of kernel page table entries, if needed.
2591  *
2592  * This routine is always called to validate any address space
2593  * beyond KERNBASE (for kldloads).  kernel_vm_end only governs the address
2594  * space below KERNBASE.
2595  */
2596 void
2597 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
2598 {
2599 	vm_paddr_t paddr;
2600 	vm_offset_t ptppaddr;
2601 	vm_page_t nkpg;
2602 	pd_entry_t *pt, newpt;
2603 	pdp_entry_t newpd;
2604 	int update_kernel_vm_end;
2605 
2606 	/*
2607 	 * bootstrap kernel_vm_end on first real VM use
2608 	 */
2609 	if (kernel_vm_end == 0) {
2610 		kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
2611 		nkpt = 0;
2612 		while ((*pmap_pt(&kernel_pmap, kernel_vm_end) & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
2613 			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
2614 					~(PAGE_SIZE * NPTEPG - 1);
2615 			nkpt++;
2616 			if (kernel_vm_end - 1 >= kernel_map.max_offset) {
2617 				kernel_vm_end = kernel_map.max_offset;
2618 				break;
2619 			}
2620 		}
2621 	}
2622 
2623 	/*
2624 	 * Fill in the gaps.  kernel_vm_end is only adjusted for ranges
2625 	 * below KERNBASE.  Ranges above KERNBASE are kldloaded and we
2626 	 * do not want to force-fill 128G worth of page tables.
2627 	 */
2628 	if (kstart < KERNBASE) {
2629 		if (kstart > kernel_vm_end)
2630 			kstart = kernel_vm_end;
2631 		KKASSERT(kend <= KERNBASE);
2632 		update_kernel_vm_end = 1;
2633 	} else {
2634 		update_kernel_vm_end = 0;
2635 	}
2636 
2637 	kstart = rounddown2(kstart, PAGE_SIZE * NPTEPG);
2638 	kend = roundup2(kend, PAGE_SIZE * NPTEPG);
2639 
2640 	if (kend - 1 >= kernel_map.max_offset)
2641 		kend = kernel_map.max_offset;
2642 
2643 	while (kstart < kend) {
2644 		pt = pmap_pt(&kernel_pmap, kstart);
2645 		if (pt == NULL) {
2646 			/* We need a new PDP entry */
2647 			nkpg = vm_page_alloc(NULL, nkpt,
2648 			                     VM_ALLOC_NORMAL |
2649 					     VM_ALLOC_SYSTEM |
2650 					     VM_ALLOC_INTERRUPT);
2651 			if (nkpg == NULL) {
2652 				panic("pmap_growkernel: no memory to grow "
2653 				      "kernel");
2654 			}
2655 			paddr = VM_PAGE_TO_PHYS(nkpg);
2656 			if ((nkpg->flags & PG_ZERO) == 0)
2657 				pmap_zero_page(paddr);
2658 			vm_page_flag_clear(nkpg, PG_ZERO);
2659 			newpd = (pdp_entry_t)
2660 			    (paddr |
2661 			    kernel_pmap.pmap_bits[PG_V_IDX] |
2662 			    kernel_pmap.pmap_bits[PG_RW_IDX] |
2663 			    kernel_pmap.pmap_bits[PG_A_IDX] |
2664 			    kernel_pmap.pmap_bits[PG_M_IDX]);
2665 			*pmap_pd(&kernel_pmap, kstart) = newpd;
2666 			nkpt++;
2667 			continue; /* try again */
2668 		}
2669 		if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
2670 			kstart = (kstart + PAGE_SIZE * NPTEPG) &
2671 				 ~(PAGE_SIZE * NPTEPG - 1);
2672 			if (kstart - 1 >= kernel_map.max_offset) {
2673 				kstart = kernel_map.max_offset;
2674 				break;
2675 			}
2676 			continue;
2677 		}
2678 
2679 		/*
2680 		 * This index is bogus, but out of the way
2681 		 */
2682 		nkpg = vm_page_alloc(NULL, nkpt,
2683 				     VM_ALLOC_NORMAL |
2684 				     VM_ALLOC_SYSTEM |
2685 				     VM_ALLOC_INTERRUPT);
2686 		if (nkpg == NULL)
2687 			panic("pmap_growkernel: no memory to grow kernel");
2688 
2689 		vm_page_wire(nkpg);
2690 		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
2691 		pmap_zero_page(ptppaddr);
2692 		vm_page_flag_clear(nkpg, PG_ZERO);
2693 		newpt = (pd_entry_t) (ptppaddr |
2694 		    kernel_pmap.pmap_bits[PG_V_IDX] |
2695 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
2696 		    kernel_pmap.pmap_bits[PG_A_IDX] |
2697 		    kernel_pmap.pmap_bits[PG_M_IDX]);
2698 		*pmap_pt(&kernel_pmap, kstart) = newpt;
2699 		nkpt++;
2700 
2701 		kstart = (kstart + PAGE_SIZE * NPTEPG) &
2702 			  ~(PAGE_SIZE * NPTEPG - 1);
2703 
2704 		if (kstart - 1 >= kernel_map.max_offset) {
2705 			kstart = kernel_map.max_offset;
2706 			break;
2707 		}
2708 	}
2709 
2710 	/*
2711 	 * Only update kernel_vm_end for areas below KERNBASE.
2712 	 */
2713 	if (update_kernel_vm_end && kernel_vm_end < kstart)
2714 		kernel_vm_end = kstart;
2715 }
2716 
2717 /*
2718  *	Add a reference to the specified pmap.
2719  */
2720 void
2721 pmap_reference(pmap_t pmap)
2722 {
2723 	if (pmap != NULL) {
2724 		lwkt_gettoken(&pmap->pm_token);
2725 		++pmap->pm_count;
2726 		lwkt_reltoken(&pmap->pm_token);
2727 	}
2728 }
2729 
2730 /***************************************************
2731  * page management routines.
2732  ***************************************************/
2733 
2734 /*
2735  * Hold a pv without locking it
2736  */
2737 static void
2738 pv_hold(pv_entry_t pv)
2739 {
2740 	atomic_add_int(&pv->pv_hold, 1);
2741 }
2742 
2743 /*
2744  * Hold a pv_entry, preventing its destruction.  TRUE is returned if the pv
2745  * was successfully locked, FALSE if it wasn't.  The caller must dispose of
2746  * the pv properly.
2747  *
2748  * Either the pmap->pm_spin or the related vm_page_spin (if traversing a
2749  * pv list via its page) must be held by the caller.
2750  */
2751 static int
2752 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL)
2753 {
2754 	u_int count;
2755 
2756 	/*
2757 	 * Critical path shortcut expects pv to already have one ref
2758 	 * (for the pv->pv_pmap).
2759 	 */
2760 	if (atomic_cmpset_int(&pv->pv_hold, 1, PV_HOLD_LOCKED | 2)) {
2761 #ifdef PMAP_DEBUG
2762 		pv->pv_func = func;
2763 		pv->pv_line = lineno;
2764 #endif
2765 		return TRUE;
2766 	}
2767 
2768 	for (;;) {
2769 		count = pv->pv_hold;
2770 		cpu_ccfence();
2771 		if ((count & PV_HOLD_LOCKED) == 0) {
2772 			if (atomic_cmpset_int(&pv->pv_hold, count,
2773 					      (count + 1) | PV_HOLD_LOCKED)) {
2774 #ifdef PMAP_DEBUG
2775 				pv->pv_func = func;
2776 				pv->pv_line = lineno;
2777 #endif
2778 				return TRUE;
2779 			}
2780 		} else {
2781 			if (atomic_cmpset_int(&pv->pv_hold, count, count + 1))
2782 				return FALSE;
2783 		}
2784 		/* retry */
2785 	}
2786 }
2787 
2788 /*
2789  * Drop a previously held pv_entry which could not be locked, allowing its
2790  * destruction.
2791  *
2792  * Must not be called with a spinlock held as we might zfree() the pv if it
2793  * is no longer associated with a pmap and this was the last hold count.
2794  */
2795 static void
2796 pv_drop(pv_entry_t pv)
2797 {
2798 	u_int count;
2799 
2800 	for (;;) {
2801 		count = pv->pv_hold;
2802 		cpu_ccfence();
2803 		KKASSERT((count & PV_HOLD_MASK) > 0);
2804 		KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) !=
2805 			 (PV_HOLD_LOCKED | 1));
2806 		if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) {
2807 			if ((count & PV_HOLD_MASK) == 1) {
2808 				KKASSERT(count == 1);
2809 				KKASSERT(pv->pv_pmap == NULL);
2810 				zfree(pvzone, pv);
2811 			}
2812 			return;
2813 		}
2814 		/* retry */
2815 	}
2816 }
2817 
2818 /*
2819  * Find or allocate the requested PV entry, returning a locked, held pv.
2820  *
2821  * If (*isnew) is non-zero, the returned pv will have two hold counts, one
2822  * for the caller and one representing the pmap and vm_page association.
2823  *
2824  * If (*isnew) is zero, the returned pv will have only one hold count.
2825  *
2826  * Since both associations can only be adjusted while the pv is locked,
2827  * together they represent just one additional hold.
2828  */
2829 static
2830 pv_entry_t
2831 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL)
2832 {
2833 	pv_entry_t pv;
2834 	pv_entry_t pnew = NULL;
2835 
2836 	spin_lock(&pmap->pm_spin);
2837 	for (;;) {
2838 		if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) {
2839 			pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot,
2840 							pindex);
2841 		}
2842 		if (pv == NULL) {
2843 			if (pnew == NULL) {
2844 				spin_unlock(&pmap->pm_spin);
2845 				pnew = zalloc(pvzone);
2846 				spin_lock(&pmap->pm_spin);
2847 				continue;
2848 			}
2849 			pnew->pv_pmap = pmap;
2850 			pnew->pv_pindex = pindex;
2851 			pnew->pv_hold = PV_HOLD_LOCKED | 2;
2852 #ifdef PMAP_DEBUG
2853 			pnew->pv_func = func;
2854 			pnew->pv_line = lineno;
2855 #endif
2856 			pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew);
2857 			++pmap->pm_generation;
2858 			atomic_add_long(&pmap->pm_stats.resident_count, 1);
2859 			spin_unlock(&pmap->pm_spin);
2860 			*isnew = 1;
2861 			return(pnew);
2862 		}
2863 		if (pnew) {
2864 			spin_unlock(&pmap->pm_spin);
2865 			zfree(pvzone, pnew);
2866 			pnew = NULL;
2867 			spin_lock(&pmap->pm_spin);
2868 			continue;
2869 		}
2870 		if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
2871 			spin_unlock(&pmap->pm_spin);
2872 		} else {
2873 			spin_unlock(&pmap->pm_spin);
2874 			_pv_lock(pv PMAP_DEBUG_COPY);
2875 		}
2876 		if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) {
2877 			*isnew = 0;
2878 			return(pv);
2879 		}
2880 		pv_put(pv);
2881 		spin_lock(&pmap->pm_spin);
2882 	}
2883 }
2884 
2885 /*
2886  * Find the requested PV entry, returning a locked+held pv or NULL
2887  */
2888 static
2889 pv_entry_t
2890 _pv_get(pmap_t pmap, vm_pindex_t pindex PMAP_DEBUG_DECL)
2891 {
2892 	pv_entry_t pv;
2893 
2894 	spin_lock(&pmap->pm_spin);
2895 	for (;;) {
2896 		/*
2897 		 * Shortcut cache
2898 		 */
2899 		if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) {
2900 			pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot,
2901 							pindex);
2902 		}
2903 		if (pv == NULL) {
2904 			spin_unlock(&pmap->pm_spin);
2905 			return NULL;
2906 		}
2907 		if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
2908 			spin_unlock(&pmap->pm_spin);
2909 		} else {
2910 			spin_unlock(&pmap->pm_spin);
2911 			_pv_lock(pv PMAP_DEBUG_COPY);
2912 		}
2913 		if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) {
2914 			pv_cache(pv, pindex);
2915 			return(pv);
2916 		}
2917 		pv_put(pv);
2918 		spin_lock(&pmap->pm_spin);
2919 	}
2920 }
2921 
2922 /*
2923  * Lookup, hold, and attempt to lock (pmap,pindex).
2924  *
2925  * If the entry does not exist NULL is returned and *errorp is set to 0
2926  *
2927  * If the entry exists and could be successfully locked it is returned and
2928  * errorp is set to 0.
2929  *
2930  * If the entry exists but could NOT be successfully locked it is returned
2931  * held and *errorp is set to 1.
2932  */
2933 static
2934 pv_entry_t
2935 pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp)
2936 {
2937 	pv_entry_t pv;
2938 
2939 	spin_lock_shared(&pmap->pm_spin);
2940 	if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex)
2941 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
2942 	if (pv == NULL) {
2943 		spin_unlock_shared(&pmap->pm_spin);
2944 		*errorp = 0;
2945 		return NULL;
2946 	}
2947 	if (pv_hold_try(pv)) {
2948 		pv_cache(pv, pindex);
2949 		spin_unlock_shared(&pmap->pm_spin);
2950 		*errorp = 0;
2951 		KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex);
2952 		return(pv);	/* lock succeeded */
2953 	}
2954 	spin_unlock_shared(&pmap->pm_spin);
2955 	*errorp = 1;
2956 	return (pv);		/* lock failed */
2957 }
2958 
2959 /*
2960  * Find the requested PV entry, returning a held pv or NULL
2961  */
2962 static
2963 pv_entry_t
2964 pv_find(pmap_t pmap, vm_pindex_t pindex)
2965 {
2966 	pv_entry_t pv;
2967 
2968 	spin_lock_shared(&pmap->pm_spin);
2969 
2970 	if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex)
2971 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
2972 	if (pv == NULL) {
2973 		spin_unlock_shared(&pmap->pm_spin);
2974 		return NULL;
2975 	}
2976 	pv_hold(pv);
2977 	pv_cache(pv, pindex);
2978 	spin_unlock_shared(&pmap->pm_spin);
2979 	return(pv);
2980 }
2981 
2982 /*
2983  * Lock a held pv, keeping the hold count
2984  */
2985 static
2986 void
2987 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL)
2988 {
2989 	u_int count;
2990 
2991 	for (;;) {
2992 		count = pv->pv_hold;
2993 		cpu_ccfence();
2994 		if ((count & PV_HOLD_LOCKED) == 0) {
2995 			if (atomic_cmpset_int(&pv->pv_hold, count,
2996 					      count | PV_HOLD_LOCKED)) {
2997 #ifdef PMAP_DEBUG
2998 				pv->pv_func = func;
2999 				pv->pv_line = lineno;
3000 #endif
3001 				return;
3002 			}
3003 			continue;
3004 		}
3005 		tsleep_interlock(pv, 0);
3006 		if (atomic_cmpset_int(&pv->pv_hold, count,
3007 				      count | PV_HOLD_WAITING)) {
3008 #ifdef PMAP_DEBUG
3009 			kprintf("pv waiting on %s:%d\n",
3010 					pv->pv_func, pv->pv_line);
3011 #endif
3012 			tsleep(pv, PINTERLOCKED, "pvwait", hz);
3013 		}
3014 		/* retry */
3015 	}
3016 }
3017 
3018 /*
3019  * Unlock a held and locked pv, keeping the hold count.
3020  */
3021 static
3022 void
3023 pv_unlock(pv_entry_t pv)
3024 {
3025 	u_int count;
3026 
3027 	for (;;) {
3028 		count = pv->pv_hold;
3029 		cpu_ccfence();
3030 		KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >=
3031 			 (PV_HOLD_LOCKED | 1));
3032 		if (atomic_cmpset_int(&pv->pv_hold, count,
3033 				      count &
3034 				      ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) {
3035 			if (count & PV_HOLD_WAITING)
3036 				wakeup(pv);
3037 			break;
3038 		}
3039 	}
3040 }
3041 
3042 /*
3043  * Unlock and drop a pv.  If the pv is no longer associated with a pmap
3044  * and the hold count drops to zero we will free it.
3045  *
3046  * Caller should not hold any spin locks.  We are protected from hold races
3047  * by virtue of holds only occuring only with a pmap_spin or vm_page_spin
3048  * lock held.  A pv cannot be located otherwise.
3049  */
3050 static
3051 void
3052 pv_put(pv_entry_t pv)
3053 {
3054 	/*
3055 	 * Fast - shortcut most common condition
3056 	 */
3057 	if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1))
3058 		return;
3059 
3060 	/*
3061 	 * Slow
3062 	 */
3063 	pv_unlock(pv);
3064 	pv_drop(pv);
3065 }
3066 
3067 /*
3068  * Remove the pmap association from a pv, require that pv_m already be removed,
3069  * then unlock and drop the pv.  Any pte operations must have already been
3070  * completed.  This call may result in a last-drop which will physically free
3071  * the pv.
3072  *
3073  * Removing the pmap association entails an additional drop.
3074  *
3075  * pv must be exclusively locked on call and will be disposed of on return.
3076  */
3077 static
3078 void
3079 pv_free(pv_entry_t pv)
3080 {
3081 	pmap_t pmap;
3082 
3083 	KKASSERT(pv->pv_m == NULL);
3084 	KKASSERT((pv->pv_hold & PV_HOLD_MASK) >= 2);
3085 	if ((pmap = pv->pv_pmap) != NULL) {
3086 		spin_lock(&pmap->pm_spin);
3087 		pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv);
3088 		++pmap->pm_generation;
3089 		if (pmap->pm_pvhint == pv)
3090 			pmap->pm_pvhint = NULL;
3091 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
3092 		pv->pv_pmap = NULL;
3093 		pv->pv_pindex = 0;
3094 		spin_unlock(&pmap->pm_spin);
3095 
3096 		/*
3097 		 * Try to shortcut three atomic ops, otherwise fall through
3098 		 * and do it normally.  Drop two refs and the lock all in
3099 		 * one go.
3100 		 */
3101 		if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) {
3102 			zfree(pvzone, pv);
3103 			return;
3104 		}
3105 		pv_drop(pv);	/* ref for pv_pmap */
3106 	}
3107 	pv_put(pv);
3108 }
3109 
3110 /*
3111  * This routine is very drastic, but can save the system
3112  * in a pinch.
3113  */
3114 void
3115 pmap_collect(void)
3116 {
3117 	int i;
3118 	vm_page_t m;
3119 	static int warningdone=0;
3120 
3121 	if (pmap_pagedaemon_waken == 0)
3122 		return;
3123 	pmap_pagedaemon_waken = 0;
3124 	if (warningdone < 5) {
3125 		kprintf("pmap_collect: collecting pv entries -- "
3126 			"suggest increasing PMAP_SHPGPERPROC\n");
3127 		warningdone++;
3128 	}
3129 
3130 	for (i = 0; i < vm_page_array_size; i++) {
3131 		m = &vm_page_array[i];
3132 		if (m->wire_count || m->hold_count)
3133 			continue;
3134 		if (vm_page_busy_try(m, TRUE) == 0) {
3135 			if (m->wire_count == 0 && m->hold_count == 0) {
3136 				pmap_remove_all(m);
3137 			}
3138 			vm_page_wakeup(m);
3139 		}
3140 	}
3141 }
3142 
3143 /*
3144  * Scan the pmap for active page table entries and issue a callback.
3145  * The callback must dispose of pte_pv, whos PTE entry is at *ptep in
3146  * its parent page table.
3147  *
3148  * pte_pv will be NULL if the page or page table is unmanaged.
3149  * pt_pv will point to the page table page containing the pte for the page.
3150  *
3151  * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page),
3152  *	 we pass a NULL pte_pv and we pass a pt_pv pointing to the passed
3153  *	 process pmap's PD and page to the callback function.  This can be
3154  *	 confusing because the pt_pv is really a pd_pv, and the target page
3155  *	 table page is simply aliased by the pmap and not owned by it.
3156  *
3157  * It is assumed that the start and end are properly rounded to the page size.
3158  *
3159  * It is assumed that PD pages and above are managed and thus in the RB tree,
3160  * allowing us to use RB_SCAN from the PD pages down for ranged scans.
3161  */
3162 struct pmap_scan_info {
3163 	struct pmap *pmap;
3164 	vm_offset_t sva;
3165 	vm_offset_t eva;
3166 	vm_pindex_t sva_pd_pindex;
3167 	vm_pindex_t eva_pd_pindex;
3168 	void (*func)(pmap_t, struct pmap_scan_info *,
3169 		     pv_entry_t, pv_entry_t, int, vm_offset_t,
3170 		     pt_entry_t *, void *);
3171 	void *arg;
3172 	int doinval;
3173 	struct pmap_inval_info inval;
3174 };
3175 
3176 static int pmap_scan_cmp(pv_entry_t pv, void *data);
3177 static int pmap_scan_callback(pv_entry_t pv, void *data);
3178 
3179 static void
3180 pmap_scan(struct pmap_scan_info *info)
3181 {
3182 	struct pmap *pmap = info->pmap;
3183 	pv_entry_t pd_pv;	/* A page directory PV */
3184 	pv_entry_t pt_pv;	/* A page table PV */
3185 	pv_entry_t pte_pv;	/* A page table entry PV */
3186 	pt_entry_t *ptep;
3187 	pt_entry_t oldpte;
3188 	struct pv_entry dummy_pv;
3189 	int generation;
3190 
3191 	if (pmap == NULL)
3192 		return;
3193 
3194 	/*
3195 	 * Hold the token for stability; if the pmap is empty we have nothing
3196 	 * to do.
3197 	 */
3198 	lwkt_gettoken(&pmap->pm_token);
3199 #if 0
3200 	if (pmap->pm_stats.resident_count == 0) {
3201 		lwkt_reltoken(&pmap->pm_token);
3202 		return;
3203 	}
3204 #endif
3205 
3206 	pmap_inval_init(&info->inval);
3207 
3208 again:
3209 	/*
3210 	 * Special handling for scanning one page, which is a very common
3211 	 * operation (it is?).
3212 	 *
3213 	 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4
3214 	 */
3215 	if (info->sva + PAGE_SIZE == info->eva) {
3216 		generation = pmap->pm_generation;
3217 		if (info->sva >= VM_MAX_USER_ADDRESS) {
3218 			/*
3219 			 * Kernel mappings do not track wire counts on
3220 			 * page table pages and only maintain pd_pv and
3221 			 * pte_pv levels so pmap_scan() works.
3222 			 */
3223 			pt_pv = NULL;
3224 			pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva));
3225 			ptep = vtopte(info->sva);
3226 		} else {
3227 			/*
3228 			 * User pages which are unmanaged will not have a
3229 			 * pte_pv.  User page table pages which are unmanaged
3230 			 * (shared from elsewhere) will also not have a pt_pv.
3231 			 * The func() callback will pass both pte_pv and pt_pv
3232 			 * as NULL in that case.
3233 			 */
3234 			pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva));
3235 			pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva));
3236 			if (pt_pv == NULL) {
3237 				KKASSERT(pte_pv == NULL);
3238 				pd_pv = pv_get(pmap, pmap_pd_pindex(info->sva));
3239 				if (pd_pv) {
3240 					ptep = pv_pte_lookup(pd_pv,
3241 						    pmap_pt_index(info->sva));
3242 					if (*ptep) {
3243 						info->func(pmap, info,
3244 						     NULL, pd_pv, 1,
3245 						     info->sva, ptep,
3246 						     info->arg);
3247 					}
3248 					pv_put(pd_pv);
3249 				}
3250 				goto fast_skip;
3251 			}
3252 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva));
3253 		}
3254 
3255 		/*
3256 		 * NOTE: *ptep can't be ripped out from under us if we hold
3257 		 *	 pte_pv locked, but bits can change.  However, there is
3258 		 *	 a race where another thread may be inserting pte_pv
3259 		 *	 and setting *ptep just after our pte_pv lookup fails.
3260 		 *
3261 		 *	 In this situation we can end up with a NULL pte_pv
3262 		 *	 but find that we have a managed *ptep.  We explicitly
3263 		 *	 check for this race.
3264 		 */
3265 		oldpte = *ptep;
3266 		cpu_ccfence();
3267 		if (oldpte == 0) {
3268 			/*
3269 			 * Unlike the pv_find() case below we actually
3270 			 * acquired a locked pv in this case so any
3271 			 * race should have been resolved.  It is expected
3272 			 * to not exist.
3273 			 */
3274 			KKASSERT(pte_pv == NULL);
3275 		} else if (pte_pv) {
3276 			KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
3277 					   pmap->pmap_bits[PG_V_IDX])) ==
3278 				(pmap->pmap_bits[PG_MANAGED_IDX] |
3279 				 pmap->pmap_bits[PG_V_IDX]),
3280 			    ("badA *ptep %016lx/%016lx sva %016lx pte_pv %p"
3281 			     "generation %d/%d",
3282 			    *ptep, oldpte, info->sva, pte_pv,
3283 			    generation, pmap->pm_generation));
3284 			info->func(pmap, info, pte_pv, pt_pv, 0,
3285 				   info->sva, ptep, info->arg);
3286 		} else {
3287 			/*
3288 			 * Check for insertion race
3289 			 */
3290 			if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3291 			    pt_pv) {
3292 				pte_pv = pv_find(pmap,
3293 						 pmap_pte_pindex(info->sva));
3294 				if (pte_pv) {
3295 					pv_drop(pte_pv);
3296 					pv_put(pt_pv);
3297 					kprintf("pmap_scan: RACE1 "
3298 						"%016jx, %016lx\n",
3299 						info->sva, oldpte);
3300 					goto again;
3301 				}
3302 			}
3303 
3304 			/*
3305 			 * Didn't race
3306 			 */
3307 			KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
3308 					   pmap->pmap_bits[PG_V_IDX])) ==
3309 			    pmap->pmap_bits[PG_V_IDX],
3310 			    ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL"
3311 			     "generation %d/%d",
3312 			    *ptep, oldpte, info->sva,
3313 			    generation, pmap->pm_generation));
3314 			info->func(pmap, info, NULL, pt_pv, 0,
3315 			    info->sva, ptep, info->arg);
3316 		}
3317 		if (pt_pv)
3318 			pv_put(pt_pv);
3319 fast_skip:
3320 		pmap_inval_done(&info->inval);
3321 		lwkt_reltoken(&pmap->pm_token);
3322 		return;
3323 	}
3324 
3325 	/*
3326 	 * Nominal scan case, RB_SCAN() for PD pages and iterate from
3327 	 * there.
3328 	 */
3329 	info->sva_pd_pindex = pmap_pd_pindex(info->sva);
3330 	info->eva_pd_pindex = pmap_pd_pindex(info->eva + NBPDP - 1);
3331 
3332 	if (info->sva >= VM_MAX_USER_ADDRESS) {
3333 		/*
3334 		 * The kernel does not currently maintain any pv_entry's for
3335 		 * higher-level page tables.
3336 		 */
3337 		bzero(&dummy_pv, sizeof(dummy_pv));
3338 		dummy_pv.pv_pindex = info->sva_pd_pindex;
3339 		spin_lock(&pmap->pm_spin);
3340 		while (dummy_pv.pv_pindex < info->eva_pd_pindex) {
3341 			pmap_scan_callback(&dummy_pv, info);
3342 			++dummy_pv.pv_pindex;
3343 		}
3344 		spin_unlock(&pmap->pm_spin);
3345 	} else {
3346 		/*
3347 		 * User page tables maintain local PML4, PDP, and PD
3348 		 * pv_entry's at the very least.  PT pv's might be
3349 		 * unmanaged and thus not exist.  PTE pv's might be
3350 		 * unmanaged and thus not exist.
3351 		 */
3352 		spin_lock(&pmap->pm_spin);
3353 		pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot,
3354 			pmap_scan_cmp, pmap_scan_callback, info);
3355 		spin_unlock(&pmap->pm_spin);
3356 	}
3357 	pmap_inval_done(&info->inval);
3358 	lwkt_reltoken(&pmap->pm_token);
3359 }
3360 
3361 /*
3362  * WARNING! pmap->pm_spin held
3363  */
3364 static int
3365 pmap_scan_cmp(pv_entry_t pv, void *data)
3366 {
3367 	struct pmap_scan_info *info = data;
3368 	if (pv->pv_pindex < info->sva_pd_pindex)
3369 		return(-1);
3370 	if (pv->pv_pindex >= info->eva_pd_pindex)
3371 		return(1);
3372 	return(0);
3373 }
3374 
3375 /*
3376  * WARNING! pmap->pm_spin held
3377  */
3378 static int
3379 pmap_scan_callback(pv_entry_t pv, void *data)
3380 {
3381 	struct pmap_scan_info *info = data;
3382 	struct pmap *pmap = info->pmap;
3383 	pv_entry_t pd_pv;	/* A page directory PV */
3384 	pv_entry_t pt_pv;	/* A page table PV */
3385 	pv_entry_t pte_pv;	/* A page table entry PV */
3386 	pt_entry_t *ptep;
3387 	pt_entry_t oldpte;
3388 	vm_offset_t sva;
3389 	vm_offset_t eva;
3390 	vm_offset_t va_next;
3391 	vm_pindex_t pd_pindex;
3392 	int error;
3393 	int generation;
3394 
3395 	/*
3396 	 * Pull the PD pindex from the pv before releasing the spinlock.
3397 	 *
3398 	 * WARNING: pv is faked for kernel pmap scans.
3399 	 */
3400 	pd_pindex = pv->pv_pindex;
3401 	spin_unlock(&pmap->pm_spin);
3402 	pv = NULL;	/* invalid after spinlock unlocked */
3403 
3404 	/*
3405 	 * Calculate the page range within the PD.  SIMPLE pmaps are
3406 	 * direct-mapped for the entire 2^64 address space.  Normal pmaps
3407 	 * reflect the user and kernel address space which requires
3408 	 * cannonicalization w/regards to converting pd_pindex's back
3409 	 * into addresses.
3410 	 */
3411 	sva = (pd_pindex - NUPTE_TOTAL - NUPT_TOTAL) << PDPSHIFT;
3412 	if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 &&
3413 	    (sva & PML4_SIGNMASK)) {
3414 		sva |= PML4_SIGNMASK;
3415 	}
3416 	eva = sva + NBPDP;	/* can overflow */
3417 	if (sva < info->sva)
3418 		sva = info->sva;
3419 	if (eva < info->sva || eva > info->eva)
3420 		eva = info->eva;
3421 
3422 	/*
3423 	 * NOTE: kernel mappings do not track page table pages, only
3424 	 * 	 terminal pages.
3425 	 *
3426 	 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4.
3427 	 *	 However, for the scan to be efficient we try to
3428 	 *	 cache items top-down.
3429 	 */
3430 	pd_pv = NULL;
3431 	pt_pv = NULL;
3432 
3433 	for (; sva < eva; sva = va_next) {
3434 		if (sva >= VM_MAX_USER_ADDRESS) {
3435 			if (pt_pv) {
3436 				pv_put(pt_pv);
3437 				pt_pv = NULL;
3438 			}
3439 			goto kernel_skip;
3440 		}
3441 
3442 		/*
3443 		 * PD cache (degenerate case if we skip).  It is possible
3444 		 * for the PD to not exist due to races.  This is ok.
3445 		 */
3446 		if (pd_pv == NULL) {
3447 			pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3448 		} else if (pd_pv->pv_pindex != pmap_pd_pindex(sva)) {
3449 			pv_put(pd_pv);
3450 			pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3451 		}
3452 		if (pd_pv == NULL) {
3453 			va_next = (sva + NBPDP) & ~PDPMASK;
3454 			if (va_next < sva)
3455 				va_next = eva;
3456 			continue;
3457 		}
3458 
3459 		/*
3460 		 * PT cache
3461 		 */
3462 		if (pt_pv == NULL) {
3463 			if (pd_pv) {
3464 				pv_put(pd_pv);
3465 				pd_pv = NULL;
3466 			}
3467 			pt_pv = pv_get(pmap, pmap_pt_pindex(sva));
3468 		} else if (pt_pv->pv_pindex != pmap_pt_pindex(sva)) {
3469 			if (pd_pv) {
3470 				pv_put(pd_pv);
3471 				pd_pv = NULL;
3472 			}
3473 			pv_put(pt_pv);
3474 			pt_pv = pv_get(pmap, pmap_pt_pindex(sva));
3475 		}
3476 
3477 		/*
3478 		 * If pt_pv is NULL we either have an shared page table
3479 		 * page and must issue a callback specific to that case,
3480 		 * or there is no page table page.
3481 		 *
3482 		 * Either way we can skip the page table page.
3483 		 */
3484 		if (pt_pv == NULL) {
3485 			/*
3486 			 * Possible unmanaged (shared from another pmap)
3487 			 * page table page.
3488 			 */
3489 			if (pd_pv == NULL)
3490 				pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3491 			KKASSERT(pd_pv != NULL);
3492 			ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva));
3493 			if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
3494 				info->func(pmap, info, NULL, pd_pv, 1,
3495 					   sva, ptep, info->arg);
3496 			}
3497 
3498 			/*
3499 			 * Done, move to next page table page.
3500 			 */
3501 			va_next = (sva + NBPDR) & ~PDRMASK;
3502 			if (va_next < sva)
3503 				va_next = eva;
3504 			continue;
3505 		}
3506 
3507 		/*
3508 		 * From this point in the loop testing pt_pv for non-NULL
3509 		 * means we are in UVM, else if it is NULL we are in KVM.
3510 		 *
3511 		 * Limit our scan to either the end of the va represented
3512 		 * by the current page table page, or to the end of the
3513 		 * range being removed.
3514 		 */
3515 kernel_skip:
3516 		va_next = (sva + NBPDR) & ~PDRMASK;
3517 		if (va_next < sva)
3518 			va_next = eva;
3519 		if (va_next > eva)
3520 			va_next = eva;
3521 
3522 		/*
3523 		 * Scan the page table for pages.  Some pages may not be
3524 		 * managed (might not have a pv_entry).
3525 		 *
3526 		 * There is no page table management for kernel pages so
3527 		 * pt_pv will be NULL in that case, but otherwise pt_pv
3528 		 * is non-NULL, locked, and referenced.
3529 		 */
3530 
3531 		/*
3532 		 * At this point a non-NULL pt_pv means a UVA, and a NULL
3533 		 * pt_pv means a KVA.
3534 		 */
3535 		if (pt_pv)
3536 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva));
3537 		else
3538 			ptep = vtopte(sva);
3539 
3540 		while (sva < va_next) {
3541 			/*
3542 			 * Acquire the related pte_pv, if any.  If *ptep == 0
3543 			 * the related pte_pv should not exist, but if *ptep
3544 			 * is not zero the pte_pv may or may not exist (e.g.
3545 			 * will not exist for an unmanaged page).
3546 			 *
3547 			 * However a multitude of races are possible here.
3548 			 *
3549 			 * In addition, the (pt_pv, pte_pv) lock order is
3550 			 * backwards, so we have to be careful in aquiring
3551 			 * a properly locked pte_pv.
3552 			 */
3553 			generation = pmap->pm_generation;
3554 			if (pt_pv) {
3555 				pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva),
3556 						    &error);
3557 				if (error) {
3558 					if (pd_pv) {
3559 						pv_put(pd_pv);
3560 						pd_pv = NULL;
3561 					}
3562 					pv_put(pt_pv);	 /* must be non-NULL */
3563 					pt_pv = NULL;
3564 					pv_lock(pte_pv); /* safe to block now */
3565 					pv_put(pte_pv);
3566 					pte_pv = NULL;
3567 					pt_pv = pv_get(pmap,
3568 						       pmap_pt_pindex(sva));
3569 					/*
3570 					 * pt_pv reloaded, need new ptep
3571 					 */
3572 					KKASSERT(pt_pv != NULL);
3573 					ptep = pv_pte_lookup(pt_pv,
3574 							pmap_pte_index(sva));
3575 					continue;
3576 				}
3577 			} else {
3578 				pte_pv = pv_get(pmap, pmap_pte_pindex(sva));
3579 			}
3580 
3581 			/*
3582 			 * Ok, if *ptep == 0 we had better NOT have a pte_pv.
3583 			 */
3584 			oldpte = *ptep;
3585 			if (oldpte == 0) {
3586 				if (pte_pv) {
3587 					kprintf("Unexpected non-NULL pte_pv "
3588 						"%p pt_pv %p "
3589 						"*ptep = %016lx/%016lx\n",
3590 						pte_pv, pt_pv, *ptep, oldpte);
3591 					panic("Unexpected non-NULL pte_pv");
3592 				}
3593 				sva += PAGE_SIZE;
3594 				++ptep;
3595 				continue;
3596 			}
3597 
3598 			/*
3599 			 * Ready for the callback.  The locked pte_pv (if any)
3600 			 * is consumed by the callback.  pte_pv will exist if
3601 			 *  the page is managed, and will not exist if it
3602 			 * isn't.
3603 			 */
3604 			if (pte_pv) {
3605 				KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) ==
3606 				    (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX]),
3607 				    ("badC *ptep %016lx/%016lx sva %016lx "
3608 				    "pte_pv %p pm_generation %d/%d",
3609 				    *ptep, oldpte, sva, pte_pv,
3610 				    generation, pmap->pm_generation));
3611 				info->func(pmap, info, pte_pv, pt_pv, 0,
3612 				    sva, ptep, info->arg);
3613 			} else {
3614 				/*
3615 				 * Check for insertion race.  Since there is no
3616 				 * pte_pv to guard us it is possible for us
3617 				 * to race another thread doing an insertion.
3618 				 * Our lookup misses the pte_pv but our *ptep
3619 				 * check sees the inserted pte.
3620 				 *
3621 				 * XXX panic case seems to occur within a
3622 				 * vm_fork() of /bin/sh, which frankly
3623 				 * shouldn't happen since no other threads
3624 				 * should be inserting to our pmap in that
3625 				 * situation.  Removing, possibly.  Inserting,
3626 				 * shouldn't happen.
3627 				 */
3628 				if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3629 				    pt_pv) {
3630 					pte_pv = pv_find(pmap,
3631 							 pmap_pte_pindex(sva));
3632 					if (pte_pv) {
3633 						pv_drop(pte_pv);
3634 						kprintf("pmap_scan: RACE2 "
3635 							"%016jx, %016lx\n",
3636 							sva, oldpte);
3637 						continue;
3638 					}
3639 				}
3640 
3641 				/*
3642 				 * Didn't race
3643 				 */
3644 				KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) ==
3645 				    pmap->pmap_bits[PG_V_IDX],
3646 				    ("badD *ptep %016lx/%016lx sva %016lx "
3647 				    "pte_pv NULL pm_generation %d/%d",
3648 				     *ptep, oldpte, sva,
3649 				     generation, pmap->pm_generation));
3650 				info->func(pmap, info, NULL, pt_pv, 0,
3651 				    sva, ptep, info->arg);
3652 			}
3653 			pte_pv = NULL;
3654 			sva += PAGE_SIZE;
3655 			++ptep;
3656 		}
3657 		lwkt_yield();
3658 	}
3659 	if (pd_pv) {
3660 		pv_put(pd_pv);
3661 		pd_pv = NULL;
3662 	}
3663 	if (pt_pv) {
3664 		pv_put(pt_pv);
3665 		pt_pv = NULL;
3666 	}
3667 	lwkt_yield();
3668 
3669 	/*
3670 	 * Relock before returning.
3671 	 */
3672 	spin_lock(&pmap->pm_spin);
3673 	return (0);
3674 }
3675 
3676 void
3677 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
3678 {
3679 	struct pmap_scan_info info;
3680 
3681 	info.pmap = pmap;
3682 	info.sva = sva;
3683 	info.eva = eva;
3684 	info.func = pmap_remove_callback;
3685 	info.arg = NULL;
3686 	info.doinval = 1;	/* normal remove requires pmap inval */
3687 	pmap_scan(&info);
3688 }
3689 
3690 static void
3691 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
3692 {
3693 	struct pmap_scan_info info;
3694 
3695 	info.pmap = pmap;
3696 	info.sva = sva;
3697 	info.eva = eva;
3698 	info.func = pmap_remove_callback;
3699 	info.arg = NULL;
3700 	info.doinval = 0;	/* normal remove requires pmap inval */
3701 	pmap_scan(&info);
3702 }
3703 
3704 static void
3705 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
3706 		     pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
3707 		     vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
3708 {
3709 	pt_entry_t pte;
3710 
3711 	if (pte_pv) {
3712 		/*
3713 		 * This will also drop pt_pv's wire_count. Note that
3714 		 * terminal pages are not wired based on mmu presence.
3715 		 */
3716 		if (info->doinval)
3717 			pmap_remove_pv_pte(pte_pv, pt_pv, &info->inval);
3718 		else
3719 			pmap_remove_pv_pte(pte_pv, pt_pv, NULL);
3720 		pmap_remove_pv_page(pte_pv);
3721 		pv_free(pte_pv);
3722 	} else if (sharept == 0) {
3723 		/*
3724 		 * Unmanaged page
3725 		 *
3726 		 * pt_pv's wire_count is still bumped by unmanaged pages
3727 		 * so we must decrement it manually.
3728 		 */
3729 		if (info->doinval)
3730 			pmap_inval_interlock(&info->inval, pmap, va);
3731 		pte = pte_load_clear(ptep);
3732 		if (info->doinval)
3733 			pmap_inval_deinterlock(&info->inval, pmap);
3734 		if (pte & pmap->pmap_bits[PG_W_IDX])
3735 			atomic_add_long(&pmap->pm_stats.wired_count, -1);
3736 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
3737 		if (vm_page_unwire_quick(pt_pv->pv_m))
3738 			panic("pmap_remove: insufficient wirecount");
3739 	} else {
3740 		/*
3741 		 * Unmanaged page table, pt_pv is actually the pd_pv
3742 		 * for our pmap (not the share object pmap).
3743 		 *
3744 		 * We have to unwire the target page table page and we
3745 		 * have to unwire our page directory page.
3746 		 */
3747 		if (info->doinval)
3748 			pmap_inval_interlock(&info->inval, pmap, va);
3749 		pte = pte_load_clear(ptep);
3750 		if (info->doinval)
3751 			pmap_inval_deinterlock(&info->inval, pmap);
3752 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
3753 		KKASSERT((pte & pmap->pmap_bits[PG_DEVICE_IDX]) == 0);
3754 		if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
3755 			panic("pmap_remove: shared pgtable1 bad wirecount");
3756 		if (vm_page_unwire_quick(pt_pv->pv_m))
3757 			panic("pmap_remove: shared pgtable2 bad wirecount");
3758 	}
3759 }
3760 
3761 /*
3762  * Removes this physical page from all physical maps in which it resides.
3763  * Reflects back modify bits to the pager.
3764  *
3765  * This routine may not be called from an interrupt.
3766  */
3767 static
3768 void
3769 pmap_remove_all(vm_page_t m)
3770 {
3771 	struct pmap_inval_info info;
3772 	pv_entry_t pv;
3773 
3774 	if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
3775 		return;
3776 
3777 	pmap_inval_init(&info);
3778 	vm_page_spin_lock(m);
3779 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3780 		KKASSERT(pv->pv_m == m);
3781 		if (pv_hold_try(pv)) {
3782 			vm_page_spin_unlock(m);
3783 		} else {
3784 			vm_page_spin_unlock(m);
3785 			pv_lock(pv);
3786 		}
3787 		if (pv->pv_m != m) {
3788 			pv_put(pv);
3789 			vm_page_spin_lock(m);
3790 			continue;
3791 		}
3792 		/*
3793 		 * Holding no spinlocks, pv is locked.
3794 		 */
3795 		pmap_remove_pv_pte(pv, NULL, &info);
3796 		pmap_remove_pv_page(pv);
3797 		pv_free(pv);
3798 		vm_page_spin_lock(m);
3799 	}
3800 	KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
3801 	vm_page_spin_unlock(m);
3802 	pmap_inval_done(&info);
3803 }
3804 
3805 /*
3806  * Set the physical protection on the specified range of this map
3807  * as requested.  This function is typically only used for debug watchpoints
3808  * and COW pages.
3809  *
3810  * This function may not be called from an interrupt if the map is
3811  * not the kernel_pmap.
3812  *
3813  * NOTE!  For shared page table pages we just unmap the page.
3814  */
3815 void
3816 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3817 {
3818 	struct pmap_scan_info info;
3819 	/* JG review for NX */
3820 
3821 	if (pmap == NULL)
3822 		return;
3823 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
3824 		pmap_remove(pmap, sva, eva);
3825 		return;
3826 	}
3827 	if (prot & VM_PROT_WRITE)
3828 		return;
3829 	info.pmap = pmap;
3830 	info.sva = sva;
3831 	info.eva = eva;
3832 	info.func = pmap_protect_callback;
3833 	info.arg = &prot;
3834 	info.doinval = 1;
3835 	pmap_scan(&info);
3836 }
3837 
3838 static
3839 void
3840 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
3841 		      pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
3842 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
3843 {
3844 	pt_entry_t pbits;
3845 	pt_entry_t cbits;
3846 	pt_entry_t pte;
3847 	vm_page_t m;
3848 
3849 	/*
3850 	 * XXX non-optimal.
3851 	 */
3852 	pmap_inval_interlock(&info->inval, pmap, va);
3853 again:
3854 	pbits = *ptep;
3855 	cbits = pbits;
3856 	if (pte_pv) {
3857 		m = NULL;
3858 		if (pbits & pmap->pmap_bits[PG_A_IDX]) {
3859 			if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
3860 				m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
3861 				KKASSERT(m == pte_pv->pv_m);
3862 				vm_page_flag_set(m, PG_REFERENCED);
3863 			}
3864 			cbits &= ~pmap->pmap_bits[PG_A_IDX];
3865 		}
3866 		if (pbits & pmap->pmap_bits[PG_M_IDX]) {
3867 			if (pmap_track_modified(pte_pv->pv_pindex)) {
3868 				if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
3869 					if (m == NULL) {
3870 						m = PHYS_TO_VM_PAGE(pbits &
3871 								    PG_FRAME);
3872 					}
3873 					vm_page_dirty(m);
3874 				}
3875 				cbits &= ~pmap->pmap_bits[PG_M_IDX];
3876 			}
3877 		}
3878 	} else if (sharept) {
3879 		/*
3880 		 * Unmanaged page table, pt_pv is actually the pd_pv
3881 		 * for our pmap (not the share object pmap).
3882 		 *
3883 		 * When asked to protect something in a shared page table
3884 		 * page we just unmap the page table page.  We have to
3885 		 * invalidate the tlb in this situation.
3886 		 *
3887 		 * XXX Warning, shared page tables will not be used for
3888 		 * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings
3889 		 * so PHYS_TO_VM_PAGE() should be safe here.
3890 		 */
3891 		pte = pte_load_clear(ptep);
3892 		pmap_inval_invltlb(&info->inval);
3893 		if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
3894 			panic("pmap_protect: pgtable1 pg bad wirecount");
3895 		if (vm_page_unwire_quick(pt_pv->pv_m))
3896 			panic("pmap_protect: pgtable2 pg bad wirecount");
3897 		ptep = NULL;
3898 	}
3899 	/* else unmanaged page, adjust bits, no wire changes */
3900 
3901 	if (ptep) {
3902 		cbits &= ~pmap->pmap_bits[PG_RW_IDX];
3903 		if (pbits != cbits && !atomic_cmpset_long(ptep, pbits, cbits)) {
3904 			goto again;
3905 		}
3906 	}
3907 	pmap_inval_deinterlock(&info->inval, pmap);
3908 	if (pte_pv)
3909 		pv_put(pte_pv);
3910 }
3911 
3912 /*
3913  * Insert the vm_page (m) at the virtual address (va), replacing any prior
3914  * mapping at that address.  Set protection and wiring as requested.
3915  *
3916  * If entry is non-NULL we check to see if the SEG_SIZE optimization is
3917  * possible.  If it is we enter the page into the appropriate shared pmap
3918  * hanging off the related VM object instead of the passed pmap, then we
3919  * share the page table page from the VM object's pmap into the current pmap.
3920  *
3921  * NOTE: This routine MUST insert the page into the pmap now, it cannot
3922  *	 lazy-evaluate.
3923  */
3924 void
3925 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3926 	   boolean_t wired, vm_map_entry_t entry)
3927 {
3928 	pmap_inval_info info;
3929 	pv_entry_t pt_pv;	/* page table */
3930 	pv_entry_t pte_pv;	/* page table entry */
3931 	pt_entry_t *ptep;
3932 	vm_paddr_t opa;
3933 	pt_entry_t origpte, newpte;
3934 	vm_paddr_t pa;
3935 
3936 	if (pmap == NULL)
3937 		return;
3938 	va = trunc_page(va);
3939 #ifdef PMAP_DIAGNOSTIC
3940 	if (va >= KvaEnd)
3941 		panic("pmap_enter: toobig");
3942 	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
3943 		panic("pmap_enter: invalid to pmap_enter page table "
3944 		      "pages (va: 0x%lx)", va);
3945 #endif
3946 	if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
3947 		kprintf("Warning: pmap_enter called on UVA with "
3948 			"kernel_pmap\n");
3949 #ifdef DDB
3950 		db_print_backtrace();
3951 #endif
3952 	}
3953 	if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
3954 		kprintf("Warning: pmap_enter called on KVA without"
3955 			"kernel_pmap\n");
3956 #ifdef DDB
3957 		db_print_backtrace();
3958 #endif
3959 	}
3960 
3961 	/*
3962 	 * Get locked PV entries for our new page table entry (pte_pv)
3963 	 * and for its parent page table (pt_pv).  We need the parent
3964 	 * so we can resolve the location of the ptep.
3965 	 *
3966 	 * Only hardware MMU actions can modify the ptep out from
3967 	 * under us.
3968 	 *
3969 	 * if (m) is fictitious or unmanaged we do not create a managing
3970 	 * pte_pv for it.  Any pre-existing page's management state must
3971 	 * match (avoiding code complexity).
3972 	 *
3973 	 * If the pmap is still being initialized we assume existing
3974 	 * page tables.
3975 	 *
3976 	 * Kernel mapppings do not track page table pages (i.e. pt_pv).
3977 	 */
3978 	if (pmap_initialized == FALSE) {
3979 		pte_pv = NULL;
3980 		pt_pv = NULL;
3981 		ptep = vtopte(va);
3982 		origpte = *ptep;
3983 	} else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */
3984 		pte_pv = NULL;
3985 		if (va >= VM_MAX_USER_ADDRESS) {
3986 			pt_pv = NULL;
3987 			ptep = vtopte(va);
3988 		} else {
3989 			pt_pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va),
3990 						  NULL, entry, va);
3991 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
3992 		}
3993 		origpte = *ptep;
3994 		cpu_ccfence();
3995 		KKASSERT(origpte == 0 ||
3996 			 (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0);
3997 	} else {
3998 		if (va >= VM_MAX_USER_ADDRESS) {
3999 			/*
4000 			 * Kernel map, pv_entry-tracked.
4001 			 */
4002 			pt_pv = NULL;
4003 			pte_pv = pmap_allocpte(pmap, pmap_pte_pindex(va), NULL);
4004 			ptep = vtopte(va);
4005 		} else {
4006 			/*
4007 			 * User map
4008 			 */
4009 			pte_pv = pmap_allocpte_seg(pmap, pmap_pte_pindex(va),
4010 						   &pt_pv, entry, va);
4011 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
4012 		}
4013 		origpte = *ptep;
4014 		cpu_ccfence();
4015 		KKASSERT(origpte == 0 ||
4016 			 (origpte & pmap->pmap_bits[PG_MANAGED_IDX]));
4017 	}
4018 
4019 	pa = VM_PAGE_TO_PHYS(m);
4020 	opa = origpte & PG_FRAME;
4021 
4022 	newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) |
4023 		 pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]);
4024 	if (wired)
4025 		newpte |= pmap->pmap_bits[PG_W_IDX];
4026 	if (va < VM_MAX_USER_ADDRESS)
4027 		newpte |= pmap->pmap_bits[PG_U_IDX];
4028 	if (pte_pv)
4029 		newpte |= pmap->pmap_bits[PG_MANAGED_IDX];
4030 //	if (pmap == &kernel_pmap)
4031 //		newpte |= pgeflag;
4032 	newpte |= pmap->pmap_cache_bits[m->pat_mode];
4033 	if (m->flags & PG_FICTITIOUS)
4034 		newpte |= pmap->pmap_bits[PG_DEVICE_IDX];
4035 
4036 	/*
4037 	 * It is possible for multiple faults to occur in threaded
4038 	 * environments, the existing pte might be correct.
4039 	 */
4040 	if (((origpte ^ newpte) & ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] |
4041 	    pmap->pmap_bits[PG_A_IDX])) == 0)
4042 		goto done;
4043 
4044 	if ((prot & VM_PROT_NOSYNC) == 0)
4045 		pmap_inval_init(&info);
4046 
4047 	/*
4048 	 * Ok, either the address changed or the protection or wiring
4049 	 * changed.
4050 	 *
4051 	 * Clear the current entry, interlocking the removal.  For managed
4052 	 * pte's this will also flush the modified state to the vm_page.
4053 	 * Atomic ops are mandatory in order to ensure that PG_M events are
4054 	 * not lost during any transition.
4055 	 *
4056 	 * WARNING: The caller has busied the new page but not the original
4057 	 *	    vm_page which we are trying to replace.  Because we hold
4058 	 *	    the pte_pv lock, but have not busied the page, PG bits
4059 	 *	    can be cleared out from under us.
4060 	 */
4061 	if (opa) {
4062 		if (pte_pv) {
4063 			/*
4064 			 * pmap_remove_pv_pte() unwires pt_pv and assumes
4065 			 * we will free pte_pv, but since we are reusing
4066 			 * pte_pv we want to retain the wire count.
4067 			 *
4068 			 * pt_pv won't exist for a kernel page (managed or
4069 			 * otherwise).
4070 			 */
4071 			if (pt_pv)
4072 				vm_page_wire_quick(pt_pv->pv_m);
4073 			if (prot & VM_PROT_NOSYNC)
4074 				pmap_remove_pv_pte(pte_pv, pt_pv, NULL);
4075 			else
4076 				pmap_remove_pv_pte(pte_pv, pt_pv, &info);
4077 			if (pte_pv->pv_m)
4078 				pmap_remove_pv_page(pte_pv);
4079 		} else if (prot & VM_PROT_NOSYNC) {
4080 			/*
4081 			 * Unmanaged page, NOSYNC (no mmu sync) requested.
4082 			 *
4083 			 * Leave wire count on PT page intact.
4084 			 */
4085 			(void)pte_load_clear(ptep);
4086 			cpu_invlpg((void *)va);
4087 			atomic_add_long(&pmap->pm_stats.resident_count, -1);
4088 		} else {
4089 			/*
4090 			 * Unmanaged page, normal enter.
4091 			 *
4092 			 * Leave wire count on PT page intact.
4093 			 */
4094 			pmap_inval_interlock(&info, pmap, va);
4095 			(void)pte_load_clear(ptep);
4096 			pmap_inval_deinterlock(&info, pmap);
4097 			atomic_add_long(&pmap->pm_stats.resident_count, -1);
4098 		}
4099 		KKASSERT(*ptep == 0);
4100 	}
4101 
4102 	if (pte_pv) {
4103 		/*
4104 		 * Enter on the PV list if part of our managed memory.
4105 		 * Wiring of the PT page is already handled.
4106 		 */
4107 		KKASSERT(pte_pv->pv_m == NULL);
4108 		vm_page_spin_lock(m);
4109 		pte_pv->pv_m = m;
4110 		pmap_page_stats_adding(m);
4111 		TAILQ_INSERT_TAIL(&m->md.pv_list, pte_pv, pv_list);
4112 		vm_page_flag_set(m, PG_MAPPED);
4113 		vm_page_spin_unlock(m);
4114 	} else if (pt_pv && opa == 0) {
4115 		/*
4116 		 * We have to adjust the wire count on the PT page ourselves
4117 		 * for unmanaged entries.  If opa was non-zero we retained
4118 		 * the existing wire count from the removal.
4119 		 */
4120 		vm_page_wire_quick(pt_pv->pv_m);
4121 	}
4122 
4123 	/*
4124 	 * Kernel VMAs (pt_pv == NULL) require pmap invalidation interlocks.
4125 	 *
4126 	 * User VMAs do not because those will be zero->non-zero, so no
4127 	 * stale entries to worry about at this point.
4128 	 *
4129 	 * For KVM there appear to still be issues.  Theoretically we
4130 	 * should be able to scrap the interlocks entirely but we
4131 	 * get crashes.
4132 	 */
4133 	if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL)
4134 		pmap_inval_interlock(&info, pmap, va);
4135 
4136 	/*
4137 	 * Set the pte
4138 	 */
4139 	*(volatile pt_entry_t *)ptep = newpte;
4140 
4141 	if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL)
4142 		pmap_inval_deinterlock(&info, pmap);
4143 	else if (pt_pv == NULL)
4144 		cpu_invlpg((void *)va);
4145 
4146 	if (wired) {
4147 		if (pte_pv) {
4148 			atomic_add_long(&pte_pv->pv_pmap->pm_stats.wired_count,
4149 					1);
4150 		} else {
4151 			atomic_add_long(&pmap->pm_stats.wired_count, 1);
4152 		}
4153 	}
4154 	if (newpte & pmap->pmap_bits[PG_RW_IDX])
4155 		vm_page_flag_set(m, PG_WRITEABLE);
4156 
4157 	/*
4158 	 * Unmanaged pages need manual resident_count tracking.
4159 	 */
4160 	if (pte_pv == NULL && pt_pv)
4161 		atomic_add_long(&pt_pv->pv_pmap->pm_stats.resident_count, 1);
4162 
4163 	/*
4164 	 * Cleanup
4165 	 */
4166 	if ((prot & VM_PROT_NOSYNC) == 0 || pte_pv == NULL)
4167 		pmap_inval_done(&info);
4168 done:
4169 	KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 ||
4170 		 (m->flags & PG_MAPPED));
4171 
4172 	/*
4173 	 * Cleanup the pv entry, allowing other accessors.
4174 	 */
4175 	if (pte_pv)
4176 		pv_put(pte_pv);
4177 	if (pt_pv)
4178 		pv_put(pt_pv);
4179 }
4180 
4181 /*
4182  * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
4183  * This code also assumes that the pmap has no pre-existing entry for this
4184  * VA.
4185  *
4186  * This code currently may only be used on user pmaps, not kernel_pmap.
4187  */
4188 void
4189 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
4190 {
4191 	pmap_enter(pmap, va, m, VM_PROT_READ, FALSE, NULL);
4192 }
4193 
4194 /*
4195  * Make a temporary mapping for a physical address.  This is only intended
4196  * to be used for panic dumps.
4197  *
4198  * The caller is responsible for calling smp_invltlb().
4199  */
4200 void *
4201 pmap_kenter_temporary(vm_paddr_t pa, long i)
4202 {
4203 	pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
4204 	return ((void *)crashdumpmap);
4205 }
4206 
4207 #define MAX_INIT_PT (96)
4208 
4209 /*
4210  * This routine preloads the ptes for a given object into the specified pmap.
4211  * This eliminates the blast of soft faults on process startup and
4212  * immediately after an mmap.
4213  */
4214 static int pmap_object_init_pt_callback(vm_page_t p, void *data);
4215 
4216 void
4217 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
4218 		    vm_object_t object, vm_pindex_t pindex,
4219 		    vm_size_t size, int limit)
4220 {
4221 	struct rb_vm_page_scan_info info;
4222 	struct lwp *lp;
4223 	vm_size_t psize;
4224 
4225 	/*
4226 	 * We can't preinit if read access isn't set or there is no pmap
4227 	 * or object.
4228 	 */
4229 	if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
4230 		return;
4231 
4232 	/*
4233 	 * We can't preinit if the pmap is not the current pmap
4234 	 */
4235 	lp = curthread->td_lwp;
4236 	if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
4237 		return;
4238 
4239 	/*
4240 	 * Misc additional checks
4241 	 */
4242 	psize = x86_64_btop(size);
4243 
4244 	if ((object->type != OBJT_VNODE) ||
4245 		((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
4246 			(object->resident_page_count > MAX_INIT_PT))) {
4247 		return;
4248 	}
4249 
4250 	if (pindex + psize > object->size) {
4251 		if (object->size < pindex)
4252 			return;
4253 		psize = object->size - pindex;
4254 	}
4255 
4256 	if (psize == 0)
4257 		return;
4258 
4259 	/*
4260 	 * If everything is segment-aligned do not pre-init here.  Instead
4261 	 * allow the normal vm_fault path to pass a segment hint to
4262 	 * pmap_enter() which will then use an object-referenced shared
4263 	 * page table page.
4264 	 */
4265 	if ((addr & SEG_MASK) == 0 &&
4266 	    (ctob(psize) & SEG_MASK) == 0 &&
4267 	    (ctob(pindex) & SEG_MASK) == 0) {
4268 		return;
4269 	}
4270 
4271 	/*
4272 	 * Use a red-black scan to traverse the requested range and load
4273 	 * any valid pages found into the pmap.
4274 	 *
4275 	 * We cannot safely scan the object's memq without holding the
4276 	 * object token.
4277 	 */
4278 	info.start_pindex = pindex;
4279 	info.end_pindex = pindex + psize - 1;
4280 	info.limit = limit;
4281 	info.mpte = NULL;
4282 	info.addr = addr;
4283 	info.pmap = pmap;
4284 
4285 	vm_object_hold_shared(object);
4286 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
4287 				pmap_object_init_pt_callback, &info);
4288 	vm_object_drop(object);
4289 }
4290 
4291 static
4292 int
4293 pmap_object_init_pt_callback(vm_page_t p, void *data)
4294 {
4295 	struct rb_vm_page_scan_info *info = data;
4296 	vm_pindex_t rel_index;
4297 
4298 	/*
4299 	 * don't allow an madvise to blow away our really
4300 	 * free pages allocating pv entries.
4301 	 */
4302 	if ((info->limit & MAP_PREFAULT_MADVISE) &&
4303 		vmstats.v_free_count < vmstats.v_free_reserved) {
4304 		    return(-1);
4305 	}
4306 
4307 	/*
4308 	 * Ignore list markers and ignore pages we cannot instantly
4309 	 * busy (while holding the object token).
4310 	 */
4311 	if (p->flags & PG_MARKER)
4312 		return 0;
4313 	if (vm_page_busy_try(p, TRUE))
4314 		return 0;
4315 	if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
4316 	    (p->flags & PG_FICTITIOUS) == 0) {
4317 		if ((p->queue - p->pc) == PQ_CACHE)
4318 			vm_page_deactivate(p);
4319 		rel_index = p->pindex - info->start_pindex;
4320 		pmap_enter_quick(info->pmap,
4321 				 info->addr + x86_64_ptob(rel_index), p);
4322 	}
4323 	vm_page_wakeup(p);
4324 	lwkt_yield();
4325 	return(0);
4326 }
4327 
4328 /*
4329  * Return TRUE if the pmap is in shape to trivially pre-fault the specified
4330  * address.
4331  *
4332  * Returns FALSE if it would be non-trivial or if a pte is already loaded
4333  * into the slot.
4334  *
4335  * XXX This is safe only because page table pages are not freed.
4336  */
4337 int
4338 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
4339 {
4340 	pt_entry_t *pte;
4341 
4342 	/*spin_lock(&pmap->pm_spin);*/
4343 	if ((pte = pmap_pte(pmap, addr)) != NULL) {
4344 		if (*pte & pmap->pmap_bits[PG_V_IDX]) {
4345 			/*spin_unlock(&pmap->pm_spin);*/
4346 			return FALSE;
4347 		}
4348 	}
4349 	/*spin_unlock(&pmap->pm_spin);*/
4350 	return TRUE;
4351 }
4352 
4353 /*
4354  * Change the wiring attribute for a pmap/va pair.  The mapping must already
4355  * exist in the pmap.  The mapping may or may not be managed.
4356  */
4357 void
4358 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired,
4359 		   vm_map_entry_t entry)
4360 {
4361 	pt_entry_t *ptep;
4362 	pv_entry_t pv;
4363 
4364 	if (pmap == NULL)
4365 		return;
4366 	lwkt_gettoken(&pmap->pm_token);
4367 	pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va), NULL, entry, va);
4368 	ptep = pv_pte_lookup(pv, pmap_pte_index(va));
4369 
4370 	if (wired && !pmap_pte_w(pmap, ptep))
4371 		atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, 1);
4372 	else if (!wired && pmap_pte_w(pmap, ptep))
4373 		atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, -1);
4374 
4375 	/*
4376 	 * Wiring is not a hardware characteristic so there is no need to
4377 	 * invalidate TLB.  However, in an SMP environment we must use
4378 	 * a locked bus cycle to update the pte (if we are not using
4379 	 * the pmap_inval_*() API that is)... it's ok to do this for simple
4380 	 * wiring changes.
4381 	 */
4382 	if (wired)
4383 		atomic_set_long(ptep, pmap->pmap_bits[PG_W_IDX]);
4384 	else
4385 		atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
4386 	pv_put(pv);
4387 	lwkt_reltoken(&pmap->pm_token);
4388 }
4389 
4390 
4391 
4392 /*
4393  * Copy the range specified by src_addr/len from the source map to
4394  * the range dst_addr/len in the destination map.
4395  *
4396  * This routine is only advisory and need not do anything.
4397  */
4398 void
4399 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
4400 	  vm_size_t len, vm_offset_t src_addr)
4401 {
4402 }
4403 
4404 /*
4405  * pmap_zero_page:
4406  *
4407  *	Zero the specified physical page.
4408  *
4409  *	This function may be called from an interrupt and no locking is
4410  *	required.
4411  */
4412 void
4413 pmap_zero_page(vm_paddr_t phys)
4414 {
4415 	vm_offset_t va = PHYS_TO_DMAP(phys);
4416 
4417 	pagezero((void *)va);
4418 }
4419 
4420 /*
4421  * pmap_page_assertzero:
4422  *
4423  *	Assert that a page is empty, panic if it isn't.
4424  */
4425 void
4426 pmap_page_assertzero(vm_paddr_t phys)
4427 {
4428 	vm_offset_t va = PHYS_TO_DMAP(phys);
4429 	size_t i;
4430 
4431 	for (i = 0; i < PAGE_SIZE; i += sizeof(long)) {
4432 		if (*(long *)((char *)va + i) != 0) {
4433 			panic("pmap_page_assertzero() @ %p not zero!",
4434 			      (void *)(intptr_t)va);
4435 		}
4436 	}
4437 }
4438 
4439 /*
4440  * pmap_zero_page:
4441  *
4442  *	Zero part of a physical page by mapping it into memory and clearing
4443  *	its contents with bzero.
4444  *
4445  *	off and size may not cover an area beyond a single hardware page.
4446  */
4447 void
4448 pmap_zero_page_area(vm_paddr_t phys, int off, int size)
4449 {
4450 	vm_offset_t virt = PHYS_TO_DMAP(phys);
4451 
4452 	bzero((char *)virt + off, size);
4453 }
4454 
4455 /*
4456  * pmap_copy_page:
4457  *
4458  *	Copy the physical page from the source PA to the target PA.
4459  *	This function may be called from an interrupt.  No locking
4460  *	is required.
4461  */
4462 void
4463 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
4464 {
4465 	vm_offset_t src_virt, dst_virt;
4466 
4467 	src_virt = PHYS_TO_DMAP(src);
4468 	dst_virt = PHYS_TO_DMAP(dst);
4469 	bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE);
4470 }
4471 
4472 /*
4473  * pmap_copy_page_frag:
4474  *
4475  *	Copy the physical page from the source PA to the target PA.
4476  *	This function may be called from an interrupt.  No locking
4477  *	is required.
4478  */
4479 void
4480 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
4481 {
4482 	vm_offset_t src_virt, dst_virt;
4483 
4484 	src_virt = PHYS_TO_DMAP(src);
4485 	dst_virt = PHYS_TO_DMAP(dst);
4486 
4487 	bcopy((char *)src_virt + (src & PAGE_MASK),
4488 	      (char *)dst_virt + (dst & PAGE_MASK),
4489 	      bytes);
4490 }
4491 
4492 /*
4493  * Returns true if the pmap's pv is one of the first 16 pvs linked to from
4494  * this page.  This count may be changed upwards or downwards in the future;
4495  * it is only necessary that true be returned for a small subset of pmaps
4496  * for proper page aging.
4497  */
4498 boolean_t
4499 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4500 {
4501 	pv_entry_t pv;
4502 	int loops = 0;
4503 
4504 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4505 		return FALSE;
4506 
4507 	vm_page_spin_lock(m);
4508 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4509 		if (pv->pv_pmap == pmap) {
4510 			vm_page_spin_unlock(m);
4511 			return TRUE;
4512 		}
4513 		loops++;
4514 		if (loops >= 16)
4515 			break;
4516 	}
4517 	vm_page_spin_unlock(m);
4518 	return (FALSE);
4519 }
4520 
4521 /*
4522  * Remove all pages from specified address space this aids process exit
4523  * speeds.  Also, this code may be special cased for the current process
4524  * only.
4525  */
4526 void
4527 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4528 {
4529 	pmap_remove_noinval(pmap, sva, eva);
4530 	cpu_invltlb();
4531 }
4532 
4533 /*
4534  * pmap_testbit tests bits in pte's note that the testbit/clearbit
4535  * routines are inline, and a lot of things compile-time evaluate.
4536  */
4537 static
4538 boolean_t
4539 pmap_testbit(vm_page_t m, int bit)
4540 {
4541 	pv_entry_t pv;
4542 	pt_entry_t *pte;
4543 	pmap_t pmap;
4544 
4545 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4546 		return FALSE;
4547 
4548 	if (TAILQ_FIRST(&m->md.pv_list) == NULL)
4549 		return FALSE;
4550 	vm_page_spin_lock(m);
4551 	if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
4552 		vm_page_spin_unlock(m);
4553 		return FALSE;
4554 	}
4555 
4556 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4557 
4558 #if defined(PMAP_DIAGNOSTIC)
4559 		if (pv->pv_pmap == NULL) {
4560 			kprintf("Null pmap (tb) at pindex: %"PRIu64"\n",
4561 			    pv->pv_pindex);
4562 			continue;
4563 		}
4564 #endif
4565 		pmap = pv->pv_pmap;
4566 
4567 		/*
4568 		 * If the bit being tested is the modified bit, then
4569 		 * mark clean_map and ptes as never
4570 		 * modified.
4571 		 *
4572 		 * WARNING!  Because we do not lock the pv, *pte can be in a
4573 		 *	     state of flux.  Despite this the value of *pte
4574 		 *	     will still be related to the vm_page in some way
4575 		 *	     because the pv cannot be destroyed as long as we
4576 		 *	     hold the vm_page spin lock.
4577 		 */
4578 		if (bit == PG_A_IDX || bit == PG_M_IDX) {
4579 				//& (pmap->pmap_bits[PG_A_IDX] | pmap->pmap_bits[PG_M_IDX])) {
4580 			if (!pmap_track_modified(pv->pv_pindex))
4581 				continue;
4582 		}
4583 
4584 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4585 		if (*pte & pmap->pmap_bits[bit]) {
4586 			vm_page_spin_unlock(m);
4587 			return TRUE;
4588 		}
4589 	}
4590 	vm_page_spin_unlock(m);
4591 	return (FALSE);
4592 }
4593 
4594 /*
4595  * This routine is used to modify bits in ptes.  Only one bit should be
4596  * specified.  PG_RW requires special handling.
4597  *
4598  * Caller must NOT hold any spin locks
4599  */
4600 static __inline
4601 void
4602 pmap_clearbit(vm_page_t m, int bit_index)
4603 {
4604 	struct pmap_inval_info info;
4605 	pv_entry_t pv;
4606 	pt_entry_t *pte;
4607 	pt_entry_t pbits;
4608 	pmap_t pmap;
4609 
4610 	if (bit_index == PG_RW_IDX)
4611 		vm_page_flag_clear(m, PG_WRITEABLE);
4612 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
4613 		return;
4614 	}
4615 
4616 	/*
4617 	 * PG_M or PG_A case
4618 	 *
4619 	 * Loop over all current mappings setting/clearing as appropos If
4620 	 * setting RO do we need to clear the VAC?
4621 	 *
4622 	 * NOTE: When clearing PG_M we could also (not implemented) drop
4623 	 *	 through to the PG_RW code and clear PG_RW too, forcing
4624 	 *	 a fault on write to redetect PG_M for virtual kernels, but
4625 	 *	 it isn't necessary since virtual kernels invalidate the
4626 	 *	 pte when they clear the VPTE_M bit in their virtual page
4627 	 *	 tables.
4628 	 *
4629 	 * NOTE: Does not re-dirty the page when clearing only PG_M.
4630 	 *
4631 	 * NOTE: Because we do not lock the pv, *pte can be in a state of
4632 	 *	 flux.  Despite this the value of *pte is still somewhat
4633 	 *	 related while we hold the vm_page spin lock.
4634 	 *
4635 	 *	 *pte can be zero due to this race.  Since we are clearing
4636 	 *	 bits we basically do no harm when this race  ccurs.
4637 	 */
4638 	if (bit_index != PG_RW_IDX) {
4639 		vm_page_spin_lock(m);
4640 		TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4641 #if defined(PMAP_DIAGNOSTIC)
4642 			if (pv->pv_pmap == NULL) {
4643 				kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
4644 				    pv->pv_pindex);
4645 				continue;
4646 			}
4647 #endif
4648 			pmap = pv->pv_pmap;
4649 			pte = pmap_pte_quick(pv->pv_pmap,
4650 					     pv->pv_pindex << PAGE_SHIFT);
4651 			pbits = *pte;
4652 			if (pbits & pmap->pmap_bits[bit_index])
4653 				atomic_clear_long(pte, pmap->pmap_bits[bit_index]);
4654 		}
4655 		vm_page_spin_unlock(m);
4656 		return;
4657 	}
4658 
4659 	/*
4660 	 * Clear PG_RW.  Also clears PG_M and marks the page dirty if PG_M
4661 	 * was set.
4662 	 */
4663 	pmap_inval_init(&info);
4664 
4665 restart:
4666 	vm_page_spin_lock(m);
4667 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4668 		/*
4669 		 * don't write protect pager mappings
4670 		 */
4671 		if (!pmap_track_modified(pv->pv_pindex))
4672 			continue;
4673 
4674 #if defined(PMAP_DIAGNOSTIC)
4675 		if (pv->pv_pmap == NULL) {
4676 			kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
4677 			    pv->pv_pindex);
4678 			continue;
4679 		}
4680 #endif
4681 		pmap = pv->pv_pmap;
4682 		/*
4683 		 * Skip pages which do not have PG_RW set.
4684 		 */
4685 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4686 		if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0)
4687 			continue;
4688 
4689 		/*
4690 		 * Lock the PV
4691 		 */
4692 		if (pv_hold_try(pv)) {
4693 			vm_page_spin_unlock(m);
4694 		} else {
4695 			vm_page_spin_unlock(m);
4696 			pv_lock(pv);	/* held, now do a blocking lock */
4697 		}
4698 		if (pv->pv_pmap != pmap || pv->pv_m != m) {
4699 			pv_put(pv);	/* and release */
4700 			goto restart;	/* anything could have happened */
4701 		}
4702 		pmap_inval_interlock(&info, pmap,
4703 				     (vm_offset_t)pv->pv_pindex << PAGE_SHIFT);
4704 		KKASSERT(pv->pv_pmap == pmap);
4705 		for (;;) {
4706 			pbits = *pte;
4707 			cpu_ccfence();
4708 			if (atomic_cmpset_long(pte, pbits, pbits &
4709 			    ~(pmap->pmap_bits[PG_RW_IDX] |
4710 			    pmap->pmap_bits[PG_M_IDX]))) {
4711 				break;
4712 			}
4713 		}
4714 		pmap_inval_deinterlock(&info, pmap);
4715 		vm_page_spin_lock(m);
4716 
4717 		/*
4718 		 * If PG_M was found to be set while we were clearing PG_RW
4719 		 * we also clear PG_M (done above) and mark the page dirty.
4720 		 * Callers expect this behavior.
4721 		 */
4722 		if (pbits & pmap->pmap_bits[PG_M_IDX])
4723 			vm_page_dirty(m);
4724 		pv_put(pv);
4725 	}
4726 	vm_page_spin_unlock(m);
4727 	pmap_inval_done(&info);
4728 }
4729 
4730 /*
4731  * Lower the permission for all mappings to a given page.
4732  *
4733  * Page must be busied by caller.  Because page is busied by caller this
4734  * should not be able to race a pmap_enter().
4735  */
4736 void
4737 pmap_page_protect(vm_page_t m, vm_prot_t prot)
4738 {
4739 	/* JG NX support? */
4740 	if ((prot & VM_PROT_WRITE) == 0) {
4741 		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
4742 			/*
4743 			 * NOTE: pmap_clearbit(.. PG_RW) also clears
4744 			 *	 the PG_WRITEABLE flag in (m).
4745 			 */
4746 			pmap_clearbit(m, PG_RW_IDX);
4747 		} else {
4748 			pmap_remove_all(m);
4749 		}
4750 	}
4751 }
4752 
4753 vm_paddr_t
4754 pmap_phys_address(vm_pindex_t ppn)
4755 {
4756 	return (x86_64_ptob(ppn));
4757 }
4758 
4759 /*
4760  * Return a count of reference bits for a page, clearing those bits.
4761  * It is not necessary for every reference bit to be cleared, but it
4762  * is necessary that 0 only be returned when there are truly no
4763  * reference bits set.
4764  *
4765  * XXX: The exact number of bits to check and clear is a matter that
4766  * should be tested and standardized at some point in the future for
4767  * optimal aging of shared pages.
4768  *
4769  * This routine may not block.
4770  */
4771 int
4772 pmap_ts_referenced(vm_page_t m)
4773 {
4774 	pv_entry_t pv;
4775 	pt_entry_t *pte;
4776 	pmap_t pmap;
4777 	int rtval = 0;
4778 
4779 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4780 		return (rtval);
4781 
4782 	vm_page_spin_lock(m);
4783 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4784 		if (!pmap_track_modified(pv->pv_pindex))
4785 			continue;
4786 		pmap = pv->pv_pmap;
4787 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4788 		if (pte && (*pte & pmap->pmap_bits[PG_A_IDX])) {
4789 			atomic_clear_long(pte, pmap->pmap_bits[PG_A_IDX]);
4790 			rtval++;
4791 			if (rtval > 4)
4792 				break;
4793 		}
4794 	}
4795 	vm_page_spin_unlock(m);
4796 	return (rtval);
4797 }
4798 
4799 /*
4800  *	pmap_is_modified:
4801  *
4802  *	Return whether or not the specified physical page was modified
4803  *	in any physical maps.
4804  */
4805 boolean_t
4806 pmap_is_modified(vm_page_t m)
4807 {
4808 	boolean_t res;
4809 
4810 	res = pmap_testbit(m, PG_M_IDX);
4811 	return (res);
4812 }
4813 
4814 /*
4815  *	Clear the modify bits on the specified physical page.
4816  */
4817 void
4818 pmap_clear_modify(vm_page_t m)
4819 {
4820 	pmap_clearbit(m, PG_M_IDX);
4821 }
4822 
4823 /*
4824  *	pmap_clear_reference:
4825  *
4826  *	Clear the reference bit on the specified physical page.
4827  */
4828 void
4829 pmap_clear_reference(vm_page_t m)
4830 {
4831 	pmap_clearbit(m, PG_A_IDX);
4832 }
4833 
4834 /*
4835  * Miscellaneous support routines follow
4836  */
4837 
4838 static
4839 void
4840 i386_protection_init(void)
4841 {
4842 	int *kp, prot;
4843 
4844 	/* JG NX support may go here; No VM_PROT_EXECUTE ==> set NX bit  */
4845 	kp = protection_codes;
4846 	for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) {
4847 		switch (prot) {
4848 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
4849 			/*
4850 			 * Read access is also 0. There isn't any execute bit,
4851 			 * so just make it readable.
4852 			 */
4853 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
4854 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
4855 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
4856 			*kp++ = 0;
4857 			break;
4858 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
4859 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
4860 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
4861 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
4862 			*kp++ = pmap_bits_default[PG_RW_IDX];
4863 			break;
4864 		}
4865 	}
4866 }
4867 
4868 /*
4869  * Map a set of physical memory pages into the kernel virtual
4870  * address space. Return a pointer to where it is mapped. This
4871  * routine is intended to be used for mapping device memory,
4872  * NOT real memory.
4873  *
4874  * NOTE: We can't use pgeflag unless we invalidate the pages one at
4875  *	 a time.
4876  *
4877  * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE}
4878  *	 work whether the cpu supports PAT or not.  The remaining PAT
4879  *	 attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu
4880  *	 supports PAT.
4881  */
4882 void *
4883 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
4884 {
4885 	return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
4886 }
4887 
4888 void *
4889 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
4890 {
4891 	return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
4892 }
4893 
4894 void *
4895 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4896 {
4897 	return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
4898 }
4899 
4900 /*
4901  * Map a set of physical memory pages into the kernel virtual
4902  * address space. Return a pointer to where it is mapped. This
4903  * routine is intended to be used for mapping device memory,
4904  * NOT real memory.
4905  */
4906 void *
4907 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
4908 {
4909 	vm_offset_t va, tmpva, offset;
4910 	pt_entry_t *pte;
4911 	vm_size_t tmpsize;
4912 
4913 	offset = pa & PAGE_MASK;
4914 	size = roundup(offset + size, PAGE_SIZE);
4915 
4916 	va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
4917 	if (va == 0)
4918 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
4919 
4920 	pa = pa & ~PAGE_MASK;
4921 	for (tmpva = va, tmpsize = size; tmpsize > 0;) {
4922 		pte = vtopte(tmpva);
4923 		*pte = pa |
4924 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
4925 		    kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */
4926 		    kernel_pmap.pmap_cache_bits[mode];
4927 		tmpsize -= PAGE_SIZE;
4928 		tmpva += PAGE_SIZE;
4929 		pa += PAGE_SIZE;
4930 	}
4931 	pmap_invalidate_range(&kernel_pmap, va, va + size);
4932 	pmap_invalidate_cache_range(va, va + size);
4933 
4934 	return ((void *)(va + offset));
4935 }
4936 
4937 void
4938 pmap_unmapdev(vm_offset_t va, vm_size_t size)
4939 {
4940 	vm_offset_t base, offset;
4941 
4942 	base = va & ~PAGE_MASK;
4943 	offset = va & PAGE_MASK;
4944 	size = roundup(offset + size, PAGE_SIZE);
4945 	pmap_qremove(va, size >> PAGE_SHIFT);
4946 	kmem_free(&kernel_map, base, size);
4947 }
4948 
4949 /*
4950  * Sets the memory attribute for the specified page.
4951  */
4952 void
4953 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4954 {
4955 
4956     m->pat_mode = ma;
4957 
4958     /*
4959      * If "m" is a normal page, update its direct mapping.  This update
4960      * can be relied upon to perform any cache operations that are
4961      * required for data coherence.
4962      */
4963     if ((m->flags & PG_FICTITIOUS) == 0)
4964         pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
4965         m->pat_mode);
4966 }
4967 
4968 /*
4969  * Change the PAT attribute on an existing kernel memory map.  Caller
4970  * must ensure that the virtual memory in question is not accessed
4971  * during the adjustment.
4972  */
4973 void
4974 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode)
4975 {
4976 	pt_entry_t *pte;
4977 	vm_offset_t base;
4978 	int changed = 0;
4979 
4980 	if (va == 0)
4981 		panic("pmap_change_attr: va is NULL");
4982 	base = trunc_page(va);
4983 
4984 	while (count) {
4985 		pte = vtopte(va);
4986 		*pte = (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask)) |
4987 		       kernel_pmap.pmap_cache_bits[mode];
4988 		--count;
4989 		va += PAGE_SIZE;
4990 	}
4991 
4992 	changed = 1;	/* XXX: not optimal */
4993 
4994 	/*
4995 	 * Flush CPU caches if required to make sure any data isn't cached that
4996 	 * shouldn't be, etc.
4997 	 */
4998 	if (changed) {
4999 		pmap_invalidate_range(&kernel_pmap, base, va);
5000 		pmap_invalidate_cache_range(base, va);
5001 	}
5002 }
5003 
5004 /*
5005  * perform the pmap work for mincore
5006  */
5007 int
5008 pmap_mincore(pmap_t pmap, vm_offset_t addr)
5009 {
5010 	pt_entry_t *ptep, pte;
5011 	vm_page_t m;
5012 	int val = 0;
5013 
5014 	lwkt_gettoken(&pmap->pm_token);
5015 	ptep = pmap_pte(pmap, addr);
5016 
5017 	if (ptep && (pte = *ptep) != 0) {
5018 		vm_offset_t pa;
5019 
5020 		val = MINCORE_INCORE;
5021 		if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0)
5022 			goto done;
5023 
5024 		pa = pte & PG_FRAME;
5025 
5026 		if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
5027 			m = NULL;
5028 		else
5029 			m = PHYS_TO_VM_PAGE(pa);
5030 
5031 		/*
5032 		 * Modified by us
5033 		 */
5034 		if (pte & pmap->pmap_bits[PG_M_IDX])
5035 			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
5036 		/*
5037 		 * Modified by someone
5038 		 */
5039 		else if (m && (m->dirty || pmap_is_modified(m)))
5040 			val |= MINCORE_MODIFIED_OTHER;
5041 		/*
5042 		 * Referenced by us
5043 		 */
5044 		if (pte & pmap->pmap_bits[PG_A_IDX])
5045 			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
5046 
5047 		/*
5048 		 * Referenced by someone
5049 		 */
5050 		else if (m && ((m->flags & PG_REFERENCED) ||
5051 				pmap_ts_referenced(m))) {
5052 			val |= MINCORE_REFERENCED_OTHER;
5053 			vm_page_flag_set(m, PG_REFERENCED);
5054 		}
5055 	}
5056 done:
5057 	lwkt_reltoken(&pmap->pm_token);
5058 
5059 	return val;
5060 }
5061 
5062 /*
5063  * Replace p->p_vmspace with a new one.  If adjrefs is non-zero the new
5064  * vmspace will be ref'd and the old one will be deref'd.
5065  *
5066  * The vmspace for all lwps associated with the process will be adjusted
5067  * and cr3 will be reloaded if any lwp is the current lwp.
5068  *
5069  * The process must hold the vmspace->vm_map.token for oldvm and newvm
5070  */
5071 void
5072 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
5073 {
5074 	struct vmspace *oldvm;
5075 	struct lwp *lp;
5076 
5077 	oldvm = p->p_vmspace;
5078 	if (oldvm != newvm) {
5079 		if (adjrefs)
5080 			sysref_get(&newvm->vm_sysref);
5081 		p->p_vmspace = newvm;
5082 		KKASSERT(p->p_nthreads == 1);
5083 		lp = RB_ROOT(&p->p_lwp_tree);
5084 		pmap_setlwpvm(lp, newvm);
5085 		if (adjrefs)
5086 			sysref_put(&oldvm->vm_sysref);
5087 	}
5088 }
5089 
5090 /*
5091  * Set the vmspace for a LWP.  The vmspace is almost universally set the
5092  * same as the process vmspace, but virtual kernels need to swap out contexts
5093  * on a per-lwp basis.
5094  *
5095  * Caller does not necessarily hold any vmspace tokens.  Caller must control
5096  * the lwp (typically be in the context of the lwp).  We use a critical
5097  * section to protect against statclock and hardclock (statistics collection).
5098  */
5099 void
5100 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
5101 {
5102 	struct vmspace *oldvm;
5103 	struct pmap *pmap;
5104 
5105 	oldvm = lp->lwp_vmspace;
5106 
5107 	if (oldvm != newvm) {
5108 		crit_enter();
5109 		lp->lwp_vmspace = newvm;
5110 		if (curthread->td_lwp == lp) {
5111 			pmap = vmspace_pmap(newvm);
5112 			atomic_set_cpumask(&pmap->pm_active, mycpu->gd_cpumask);
5113 			if (pmap->pm_active & CPUMASK_LOCK)
5114 				pmap_interlock_wait(newvm);
5115 #if defined(SWTCH_OPTIM_STATS)
5116 			tlb_flush_count++;
5117 #endif
5118 			if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) {
5119 				curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
5120 			} else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) {
5121 				curthread->td_pcb->pcb_cr3 = KPML4phys;
5122 			} else {
5123 				panic("pmap_setlwpvm: unknown pmap type\n");
5124 			}
5125 			load_cr3(curthread->td_pcb->pcb_cr3);
5126 			pmap = vmspace_pmap(oldvm);
5127 			atomic_clear_cpumask(&pmap->pm_active, mycpu->gd_cpumask);
5128 		}
5129 		crit_exit();
5130 	}
5131 }
5132 
5133 /*
5134  * Called when switching to a locked pmap, used to interlock against pmaps
5135  * undergoing modifications to prevent us from activating the MMU for the
5136  * target pmap until all such modifications have completed.  We have to do
5137  * this because the thread making the modifications has already set up its
5138  * SMP synchronization mask.
5139  *
5140  * This function cannot sleep!
5141  *
5142  * No requirements.
5143  */
5144 void
5145 pmap_interlock_wait(struct vmspace *vm)
5146 {
5147 	struct pmap *pmap = &vm->vm_pmap;
5148 
5149 	if (pmap->pm_active & CPUMASK_LOCK) {
5150 		crit_enter();
5151 		KKASSERT(curthread->td_critcount >= 2);
5152 		DEBUG_PUSH_INFO("pmap_interlock_wait");
5153 		while (pmap->pm_active & CPUMASK_LOCK) {
5154 			cpu_ccfence();
5155 			lwkt_process_ipiq();
5156 		}
5157 		DEBUG_POP_INFO();
5158 		crit_exit();
5159 	}
5160 }
5161 
5162 vm_offset_t
5163 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
5164 {
5165 
5166 	if ((obj == NULL) || (size < NBPDR) ||
5167 	    ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) {
5168 		return addr;
5169 	}
5170 
5171 	addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
5172 	return addr;
5173 }
5174 
5175 /*
5176  * Used by kmalloc/kfree, page already exists at va
5177  */
5178 vm_page_t
5179 pmap_kvtom(vm_offset_t va)
5180 {
5181 	pt_entry_t *ptep = vtopte(va);
5182 
5183 	KKASSERT((*ptep & kernel_pmap.pmap_bits[PG_DEVICE_IDX]) == 0);
5184 	return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
5185 }
5186 
5187 /*
5188  * Initialize machine-specific shared page directory support.  This
5189  * is executed when a VM object is created.
5190  */
5191 void
5192 pmap_object_init(vm_object_t object)
5193 {
5194 	object->md.pmap_rw = NULL;
5195 	object->md.pmap_ro = NULL;
5196 }
5197 
5198 /*
5199  * Clean up machine-specific shared page directory support.  This
5200  * is executed when a VM object is destroyed.
5201  */
5202 void
5203 pmap_object_free(vm_object_t object)
5204 {
5205 	pmap_t pmap;
5206 
5207 	if ((pmap = object->md.pmap_rw) != NULL) {
5208 		object->md.pmap_rw = NULL;
5209 		pmap_remove_noinval(pmap,
5210 				  VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
5211 		pmap->pm_active = 0;
5212 		pmap_release(pmap);
5213 		pmap_puninit(pmap);
5214 		kfree(pmap, M_OBJPMAP);
5215 	}
5216 	if ((pmap = object->md.pmap_ro) != NULL) {
5217 		object->md.pmap_ro = NULL;
5218 		pmap_remove_noinval(pmap,
5219 				  VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
5220 		pmap->pm_active = 0;
5221 		pmap_release(pmap);
5222 		pmap_puninit(pmap);
5223 		kfree(pmap, M_OBJPMAP);
5224 	}
5225 }
5226