xref: /dragonfly/sys/platform/pc64/x86_64/pmap.c (revision 44753b81)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1994 David Greenman
5  * Copyright (c) 2003 Peter Wemm
6  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7  * Copyright (c) 2008, 2009 The DragonFly Project.
8  * Copyright (c) 2008, 2009 Jordan Gordeev.
9  * Copyright (c) 2011-2012 Matthew Dillon
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to Berkeley by
13  * the Systems Programming Group of the University of Utah Computer
14  * Science Department and William Jolitz of UUNET Technologies Inc.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 /*
45  * Manage physical address maps for x86-64 systems.
46  */
47 
48 #if 0 /* JG */
49 #include "opt_disable_pse.h"
50 #include "opt_pmap.h"
51 #endif
52 #include "opt_msgbuf.h"
53 
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/proc.h>
57 #include <sys/msgbuf.h>
58 #include <sys/vmmeter.h>
59 #include <sys/mman.h>
60 #include <sys/systm.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <sys/sysctl.h>
65 #include <sys/lock.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_zone.h>
74 
75 #include <sys/user.h>
76 #include <sys/thread2.h>
77 #include <sys/sysref2.h>
78 #include <sys/spinlock2.h>
79 #include <vm/vm_page2.h>
80 
81 #include <machine/cputypes.h>
82 #include <machine/md_var.h>
83 #include <machine/specialreg.h>
84 #include <machine/smp.h>
85 #include <machine_base/apic/apicreg.h>
86 #include <machine/globaldata.h>
87 #include <machine/pmap.h>
88 #include <machine/pmap_inval.h>
89 #include <machine/inttypes.h>
90 
91 #include <ddb/ddb.h>
92 
93 #define PMAP_KEEP_PDIRS
94 #ifndef PMAP_SHPGPERPROC
95 #define PMAP_SHPGPERPROC 2000
96 #endif
97 
98 #if defined(DIAGNOSTIC)
99 #define PMAP_DIAGNOSTIC
100 #endif
101 
102 #define MINPV 2048
103 
104 /*
105  * pmap debugging will report who owns a pv lock when blocking.
106  */
107 #ifdef PMAP_DEBUG
108 
109 #define PMAP_DEBUG_DECL		,const char *func, int lineno
110 #define PMAP_DEBUG_ARGS		, __func__, __LINE__
111 #define PMAP_DEBUG_COPY		, func, lineno
112 
113 #define pv_get(pmap, pindex)		_pv_get(pmap, pindex		\
114 							PMAP_DEBUG_ARGS)
115 #define pv_lock(pv)			_pv_lock(pv			\
116 							PMAP_DEBUG_ARGS)
117 #define pv_hold_try(pv)			_pv_hold_try(pv			\
118 							PMAP_DEBUG_ARGS)
119 #define pv_alloc(pmap, pindex, isnewp)	_pv_alloc(pmap, pindex, isnewp	\
120 							PMAP_DEBUG_ARGS)
121 
122 #else
123 
124 #define PMAP_DEBUG_DECL
125 #define PMAP_DEBUG_ARGS
126 #define PMAP_DEBUG_COPY
127 
128 #define pv_get(pmap, pindex)		_pv_get(pmap, pindex)
129 #define pv_lock(pv)			_pv_lock(pv)
130 #define pv_hold_try(pv)			_pv_hold_try(pv)
131 #define pv_alloc(pmap, pindex, isnewp)	_pv_alloc(pmap, pindex, isnewp)
132 
133 #endif
134 
135 /*
136  * Get PDEs and PTEs for user/kernel address space
137  */
138 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
139 
140 #define pmap_pde_v(pmap, pte)		((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
141 #define pmap_pte_w(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0)
142 #define pmap_pte_m(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0)
143 #define pmap_pte_u(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0)
144 #define pmap_pte_v(pmap, pte)		((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
145 
146 /*
147  * Given a map and a machine independent protection code,
148  * convert to a vax protection code.
149  */
150 #define pte_prot(m, p)		\
151 	(m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
152 static int protection_codes[PROTECTION_CODES_SIZE];
153 
154 struct pmap kernel_pmap;
155 static TAILQ_HEAD(,pmap)	pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
156 
157 MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects");
158 
159 vm_paddr_t avail_start;		/* PA of first available physical page */
160 vm_paddr_t avail_end;		/* PA of last available physical page */
161 vm_offset_t virtual2_start;	/* cutout free area prior to kernel start */
162 vm_offset_t virtual2_end;
163 vm_offset_t virtual_start;	/* VA of first avail page (after kernel bss) */
164 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
165 vm_offset_t KvaStart;		/* VA start of KVA space */
166 vm_offset_t KvaEnd;		/* VA end of KVA space (non-inclusive) */
167 vm_offset_t KvaSize;		/* max size of kernel virtual address space */
168 static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
169 //static int pgeflag;		/* PG_G or-in */
170 //static int pseflag;		/* PG_PS or-in */
171 uint64_t PatMsr;
172 
173 static int ndmpdp;
174 static vm_paddr_t dmaplimit;
175 static int nkpt;
176 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
177 
178 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE];	/* PAT -> PG_ bits */
179 /*static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];*/	/* PAT -> PG_ bits */
180 
181 static uint64_t KPTbase;
182 static uint64_t KPTphys;
183 static uint64_t	KPDphys;	/* phys addr of kernel level 2 */
184 static uint64_t	KPDbase;	/* phys addr of kernel level 2 @ KERNBASE */
185 uint64_t KPDPphys;	/* phys addr of kernel level 3 */
186 uint64_t KPML4phys;	/* phys addr of kernel level 4 */
187 
188 static uint64_t	DMPDphys;	/* phys addr of direct mapped level 2 */
189 static uint64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
190 
191 /*
192  * Data for the pv entry allocation mechanism
193  */
194 static vm_zone_t pvzone;
195 static struct vm_zone pvzone_store;
196 static struct vm_object pvzone_obj;
197 static int pv_entry_max=0, pv_entry_high_water=0;
198 static int pmap_pagedaemon_waken = 0;
199 static struct pv_entry *pvinit;
200 
201 /*
202  * All those kernel PT submaps that BSD is so fond of
203  */
204 pt_entry_t *CMAP1 = NULL, *ptmmap;
205 caddr_t CADDR1 = NULL, ptvmmap = NULL;
206 static pt_entry_t *msgbufmap;
207 struct msgbuf *msgbufp=NULL;
208 
209 /*
210  * PMAP default PG_* bits. Needed to be able to add
211  * EPT/NPT pagetable pmap_bits for the VMM module
212  */
213 uint64_t pmap_bits_default[] = {
214 		REGULAR_PMAP,					/* TYPE_IDX		0 */
215 		X86_PG_V,					/* PG_V_IDX		1 */
216 		X86_PG_RW,					/* PG_RW_IDX		2 */
217 		X86_PG_U,					/* PG_U_IDX		3 */
218 		X86_PG_A,					/* PG_A_IDX		4 */
219 		X86_PG_M,					/* PG_M_IDX		5 */
220 		X86_PG_PS,					/* PG_PS_IDX3		6 */
221 		X86_PG_G,					/* PG_G_IDX		7 */
222 		X86_PG_AVAIL1,					/* PG_AVAIL1_IDX	8 */
223 		X86_PG_AVAIL2,					/* PG_AVAIL2_IDX	9 */
224 		X86_PG_AVAIL3,					/* PG_AVAIL3_IDX	10 */
225 		X86_PG_NC_PWT | X86_PG_NC_PCD,			/* PG_N_IDX	11 */
226 };
227 /*
228  * Crashdump maps.
229  */
230 static pt_entry_t *pt_crashdumpmap;
231 static caddr_t crashdumpmap;
232 
233 #ifdef PMAP_DEBUG2
234 static int pmap_enter_debug = 0;
235 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW,
236     &pmap_enter_debug, 0, "Debug pmap_enter's");
237 #endif
238 static int pmap_yield_count = 64;
239 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW,
240     &pmap_yield_count, 0, "Yield during init_pt/release");
241 static int pmap_mmu_optimize = 0;
242 SYSCTL_INT(_machdep, OID_AUTO, pmap_mmu_optimize, CTLFLAG_RW,
243     &pmap_mmu_optimize, 0, "Share page table pages when possible");
244 int pmap_fast_kernel_cpusync = 0;
245 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW,
246     &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible");
247 
248 #define DISABLE_PSE
249 
250 /* Standard user access funtions */
251 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len,
252     size_t *lencopied);
253 extern int std_copyin (const void *udaddr, void *kaddr, size_t len);
254 extern int std_copyout (const void *kaddr, void *udaddr, size_t len);
255 extern int std_fubyte (const void *base);
256 extern int std_subyte (void *base, int byte);
257 extern long std_fuword (const void *base);
258 extern int std_suword (void *base, long word);
259 extern int std_suword32 (void *base, int word);
260 
261 static void pv_hold(pv_entry_t pv);
262 static int _pv_hold_try(pv_entry_t pv
263 				PMAP_DEBUG_DECL);
264 static void pv_drop(pv_entry_t pv);
265 static void _pv_lock(pv_entry_t pv
266 				PMAP_DEBUG_DECL);
267 static void pv_unlock(pv_entry_t pv);
268 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew
269 				PMAP_DEBUG_DECL);
270 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex
271 				PMAP_DEBUG_DECL);
272 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp);
273 static pv_entry_t pv_find(pmap_t pmap, vm_pindex_t pindex);
274 static void pv_put(pv_entry_t pv);
275 static void pv_free(pv_entry_t pv);
276 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex);
277 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
278 		      pv_entry_t *pvpp);
279 static pv_entry_t pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex,
280 		      pv_entry_t *pvpp, vm_map_entry_t entry, vm_offset_t va);
281 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp,
282 			pmap_inval_bulk_t *bulk);
283 static vm_page_t pmap_remove_pv_page(pv_entry_t pv);
284 static int pmap_release_pv(pv_entry_t pv, pv_entry_t pvp,
285 			pmap_inval_bulk_t *bulk);
286 
287 struct pmap_scan_info;
288 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
289 		      pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
290 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
291 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
292 		      pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
293 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
294 
295 static void i386_protection_init (void);
296 static void create_pagetables(vm_paddr_t *firstaddr);
297 static void pmap_remove_all (vm_page_t m);
298 static boolean_t pmap_testbit (vm_page_t m, int bit);
299 
300 static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
301 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
302 
303 static void pmap_pinit_defaults(struct pmap *pmap);
304 
305 static unsigned pdir4mb;
306 
307 static int
308 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2)
309 {
310 	if (pv1->pv_pindex < pv2->pv_pindex)
311 		return(-1);
312 	if (pv1->pv_pindex > pv2->pv_pindex)
313 		return(1);
314 	return(0);
315 }
316 
317 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry,
318              pv_entry_compare, vm_pindex_t, pv_pindex);
319 
320 static __inline
321 void
322 pmap_page_stats_adding(vm_page_t m)
323 {
324 	globaldata_t gd = mycpu;
325 
326 	if (TAILQ_EMPTY(&m->md.pv_list)) {
327 		++gd->gd_vmtotal.t_arm;
328 	} else if (TAILQ_FIRST(&m->md.pv_list) ==
329 		   TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
330 		++gd->gd_vmtotal.t_armshr;
331 		++gd->gd_vmtotal.t_avmshr;
332 	} else {
333 		++gd->gd_vmtotal.t_avmshr;
334 	}
335 }
336 
337 static __inline
338 void
339 pmap_page_stats_deleting(vm_page_t m)
340 {
341 	globaldata_t gd = mycpu;
342 
343 	if (TAILQ_EMPTY(&m->md.pv_list)) {
344 		--gd->gd_vmtotal.t_arm;
345 	} else if (TAILQ_FIRST(&m->md.pv_list) ==
346 		   TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
347 		--gd->gd_vmtotal.t_armshr;
348 		--gd->gd_vmtotal.t_avmshr;
349 	} else {
350 		--gd->gd_vmtotal.t_avmshr;
351 	}
352 }
353 
354 /*
355  * Move the kernel virtual free pointer to the next
356  * 2MB.  This is used to help improve performance
357  * by using a large (2MB) page for much of the kernel
358  * (.text, .data, .bss)
359  */
360 static
361 vm_offset_t
362 pmap_kmem_choose(vm_offset_t addr)
363 {
364 	vm_offset_t newaddr = addr;
365 
366 	newaddr = roundup2(addr, NBPDR);
367 	return newaddr;
368 }
369 
370 /*
371  * pmap_pte_quick:
372  *
373  *	Super fast pmap_pte routine best used when scanning the pv lists.
374  *	This eliminates many course-grained invltlb calls.  Note that many of
375  *	the pv list scans are across different pmaps and it is very wasteful
376  *	to do an entire invltlb when checking a single mapping.
377  */
378 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
379 
380 static
381 pt_entry_t *
382 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
383 {
384 	return pmap_pte(pmap, va);
385 }
386 
387 /*
388  * Returns the pindex of a page table entry (representing a terminal page).
389  * There are NUPTE_TOTAL page table entries possible (a huge number)
390  *
391  * x86-64 has a 48-bit address space, where bit 47 is sign-extended out.
392  * We want to properly translate negative KVAs.
393  */
394 static __inline
395 vm_pindex_t
396 pmap_pte_pindex(vm_offset_t va)
397 {
398 	return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1));
399 }
400 
401 /*
402  * Returns the pindex of a page table.
403  */
404 static __inline
405 vm_pindex_t
406 pmap_pt_pindex(vm_offset_t va)
407 {
408 	return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1)));
409 }
410 
411 /*
412  * Returns the pindex of a page directory.
413  */
414 static __inline
415 vm_pindex_t
416 pmap_pd_pindex(vm_offset_t va)
417 {
418 	return (NUPTE_TOTAL + NUPT_TOTAL +
419 		((va >> PDPSHIFT) & (NUPD_TOTAL - 1)));
420 }
421 
422 static __inline
423 vm_pindex_t
424 pmap_pdp_pindex(vm_offset_t va)
425 {
426 	return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
427 		((va >> PML4SHIFT) & (NUPDP_TOTAL - 1)));
428 }
429 
430 static __inline
431 vm_pindex_t
432 pmap_pml4_pindex(void)
433 {
434 	return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL);
435 }
436 
437 /*
438  * Return various clipped indexes for a given VA
439  *
440  * Returns the index of a pte in a page table, representing a terminal
441  * page.
442  */
443 static __inline
444 vm_pindex_t
445 pmap_pte_index(vm_offset_t va)
446 {
447 	return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
448 }
449 
450 /*
451  * Returns the index of a pt in a page directory, representing a page
452  * table.
453  */
454 static __inline
455 vm_pindex_t
456 pmap_pt_index(vm_offset_t va)
457 {
458 	return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
459 }
460 
461 /*
462  * Returns the index of a pd in a page directory page, representing a page
463  * directory.
464  */
465 static __inline
466 vm_pindex_t
467 pmap_pd_index(vm_offset_t va)
468 {
469 	return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
470 }
471 
472 /*
473  * Returns the index of a pdp in the pml4 table, representing a page
474  * directory page.
475  */
476 static __inline
477 vm_pindex_t
478 pmap_pdp_index(vm_offset_t va)
479 {
480 	return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
481 }
482 
483 /*
484  * Generic procedure to index a pte from a pt, pd, or pdp.
485  *
486  * NOTE: Normally passed pindex as pmap_xx_index().  pmap_xx_pindex() is NOT
487  *	 a page table page index but is instead of PV lookup index.
488  */
489 static
490 void *
491 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex)
492 {
493 	pt_entry_t *pte;
494 
495 	pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m));
496 	return(&pte[pindex]);
497 }
498 
499 /*
500  * Return pointer to PDP slot in the PML4
501  */
502 static __inline
503 pml4_entry_t *
504 pmap_pdp(pmap_t pmap, vm_offset_t va)
505 {
506 	return (&pmap->pm_pml4[pmap_pdp_index(va)]);
507 }
508 
509 /*
510  * Return pointer to PD slot in the PDP given a pointer to the PDP
511  */
512 static __inline
513 pdp_entry_t *
514 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va)
515 {
516 	pdp_entry_t *pd;
517 
518 	pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME);
519 	return (&pd[pmap_pd_index(va)]);
520 }
521 
522 /*
523  * Return pointer to PD slot in the PDP.
524  */
525 static __inline
526 pdp_entry_t *
527 pmap_pd(pmap_t pmap, vm_offset_t va)
528 {
529 	pml4_entry_t *pdp;
530 
531 	pdp = pmap_pdp(pmap, va);
532 	if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0)
533 		return NULL;
534 	return (pmap_pdp_to_pd(*pdp, va));
535 }
536 
537 /*
538  * Return pointer to PT slot in the PD given a pointer to the PD
539  */
540 static __inline
541 pd_entry_t *
542 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va)
543 {
544 	pd_entry_t *pt;
545 
546 	pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME);
547 	return (&pt[pmap_pt_index(va)]);
548 }
549 
550 /*
551  * Return pointer to PT slot in the PD
552  *
553  * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs,
554  *		     so we cannot lookup the PD via the PDP.  Instead we
555  *		     must look it up via the pmap.
556  */
557 static __inline
558 pd_entry_t *
559 pmap_pt(pmap_t pmap, vm_offset_t va)
560 {
561 	pdp_entry_t *pd;
562 	pv_entry_t pv;
563 	vm_pindex_t pd_pindex;
564 
565 	if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
566 		pd_pindex = pmap_pd_pindex(va);
567 		spin_lock(&pmap->pm_spin);
568 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex);
569 		spin_unlock(&pmap->pm_spin);
570 		if (pv == NULL || pv->pv_m == NULL)
571 			return NULL;
572 		return (pmap_pd_to_pt(VM_PAGE_TO_PHYS(pv->pv_m), va));
573 	} else {
574 		pd = pmap_pd(pmap, va);
575 		if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0)
576 			 return NULL;
577 		return (pmap_pd_to_pt(*pd, va));
578 	}
579 }
580 
581 /*
582  * Return pointer to PTE slot in the PT given a pointer to the PT
583  */
584 static __inline
585 pt_entry_t *
586 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va)
587 {
588 	pt_entry_t *pte;
589 
590 	pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME);
591 	return (&pte[pmap_pte_index(va)]);
592 }
593 
594 /*
595  * Return pointer to PTE slot in the PT
596  */
597 static __inline
598 pt_entry_t *
599 pmap_pte(pmap_t pmap, vm_offset_t va)
600 {
601 	pd_entry_t *pt;
602 
603 	pt = pmap_pt(pmap, va);
604 	if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0)
605 		 return NULL;
606 	if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0)
607 		return ((pt_entry_t *)pt);
608 	return (pmap_pt_to_pte(*pt, va));
609 }
610 
611 /*
612  * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
613  * the PT layer.  This will speed up core pmap operations considerably.
614  *
615  * NOTE: The pmap spinlock does not need to be held but the passed-in pv
616  *	 must be in a known associated state (typically by being locked when
617  *	 the pmap spinlock isn't held).  We allow the race for that case.
618  */
619 static __inline
620 void
621 pv_cache(pv_entry_t pv, vm_pindex_t pindex)
622 {
623 	if (pindex >= pmap_pt_pindex(0) && pindex <= pmap_pd_pindex(0))
624 		pv->pv_pmap->pm_pvhint = pv;
625 }
626 
627 
628 /*
629  * Return address of PT slot in PD (KVM only)
630  *
631  * Cannot be used for user page tables because it might interfere with
632  * the shared page-table-page optimization (pmap_mmu_optimize).
633  */
634 static __inline
635 pd_entry_t *
636 vtopt(vm_offset_t va)
637 {
638 	uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
639 				  NPML4EPGSHIFT)) - 1);
640 
641 	return (PDmap + ((va >> PDRSHIFT) & mask));
642 }
643 
644 /*
645  * KVM - return address of PTE slot in PT
646  */
647 static __inline
648 pt_entry_t *
649 vtopte(vm_offset_t va)
650 {
651 	uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
652 				  NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
653 
654 	return (PTmap + ((va >> PAGE_SHIFT) & mask));
655 }
656 
657 static uint64_t
658 allocpages(vm_paddr_t *firstaddr, long n)
659 {
660 	uint64_t ret;
661 
662 	ret = *firstaddr;
663 	bzero((void *)ret, n * PAGE_SIZE);
664 	*firstaddr += n * PAGE_SIZE;
665 	return (ret);
666 }
667 
668 static
669 void
670 create_pagetables(vm_paddr_t *firstaddr)
671 {
672 	long i;		/* must be 64 bits */
673 	long nkpt_base;
674 	long nkpt_phys;
675 	int j;
676 
677 	/*
678 	 * We are running (mostly) V=P at this point
679 	 *
680 	 * Calculate NKPT - number of kernel page tables.  We have to
681 	 * accomodoate prealloction of the vm_page_array, dump bitmap,
682 	 * MSGBUF_SIZE, and other stuff.  Be generous.
683 	 *
684 	 * Maxmem is in pages.
685 	 *
686 	 * ndmpdp is the number of 1GB pages we wish to map.
687 	 */
688 	ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
689 	if (ndmpdp < 4)		/* Minimum 4GB of dirmap */
690 		ndmpdp = 4;
691 	KKASSERT(ndmpdp <= NKPDPE * NPDEPG);
692 
693 	/*
694 	 * Starting at the beginning of kvm (not KERNBASE).
695 	 */
696 	nkpt_phys = (Maxmem * sizeof(struct vm_page) + NBPDR - 1) / NBPDR;
697 	nkpt_phys += (Maxmem * sizeof(struct pv_entry) + NBPDR - 1) / NBPDR;
698 	nkpt_phys += ((nkpt + nkpt + 1 + NKPML4E + NKPDPE + NDMPML4E +
699 		       ndmpdp) + 511) / 512;
700 	nkpt_phys += 128;
701 
702 	/*
703 	 * Starting at KERNBASE - map 2G worth of page table pages.
704 	 * KERNBASE is offset -2G from the end of kvm.
705 	 */
706 	nkpt_base = (NPDPEPG - KPDPI) * NPTEPG;	/* typically 2 x 512 */
707 
708 	/*
709 	 * Allocate pages
710 	 */
711 	KPTbase = allocpages(firstaddr, nkpt_base);
712 	KPTphys = allocpages(firstaddr, nkpt_phys);
713 	KPML4phys = allocpages(firstaddr, 1);
714 	KPDPphys = allocpages(firstaddr, NKPML4E);
715 	KPDphys = allocpages(firstaddr, NKPDPE);
716 
717 	/*
718 	 * Calculate the page directory base for KERNBASE,
719 	 * that is where we start populating the page table pages.
720 	 * Basically this is the end - 2.
721 	 */
722 	KPDbase = KPDphys + ((NKPDPE - (NPDPEPG - KPDPI)) << PAGE_SHIFT);
723 
724 	DMPDPphys = allocpages(firstaddr, NDMPML4E);
725 	if ((amd_feature & AMDID_PAGE1GB) == 0)
726 		DMPDphys = allocpages(firstaddr, ndmpdp);
727 	dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
728 
729 	/*
730 	 * Fill in the underlying page table pages for the area around
731 	 * KERNBASE.  This remaps low physical memory to KERNBASE.
732 	 *
733 	 * Read-only from zero to physfree
734 	 * XXX not fully used, underneath 2M pages
735 	 */
736 	for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
737 		((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT;
738 		((pt_entry_t *)KPTbase)[i] |=
739 		    pmap_bits_default[PG_RW_IDX] |
740 		    pmap_bits_default[PG_V_IDX] |
741 		    pmap_bits_default[PG_G_IDX];
742 	}
743 
744 	/*
745 	 * Now map the initial kernel page tables.  One block of page
746 	 * tables is placed at the beginning of kernel virtual memory,
747 	 * and another block is placed at KERNBASE to map the kernel binary,
748 	 * data, bss, and initial pre-allocations.
749 	 */
750 	for (i = 0; i < nkpt_base; i++) {
751 		((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT);
752 		((pd_entry_t *)KPDbase)[i] |=
753 		    pmap_bits_default[PG_RW_IDX] |
754 		    pmap_bits_default[PG_V_IDX];
755 	}
756 	for (i = 0; i < nkpt_phys; i++) {
757 		((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
758 		((pd_entry_t *)KPDphys)[i] |=
759 		    pmap_bits_default[PG_RW_IDX] |
760 		    pmap_bits_default[PG_V_IDX];
761 	}
762 
763 	/*
764 	 * Map from zero to end of allocations using 2M pages as an
765 	 * optimization.  This will bypass some of the KPTBase pages
766 	 * above in the KERNBASE area.
767 	 */
768 	for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
769 		((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT;
770 		((pd_entry_t *)KPDbase)[i] |=
771 		    pmap_bits_default[PG_RW_IDX] |
772 		    pmap_bits_default[PG_V_IDX] |
773 		    pmap_bits_default[PG_PS_IDX] |
774 		    pmap_bits_default[PG_G_IDX];
775 	}
776 
777 	/*
778 	 * And connect up the PD to the PDP.  The kernel pmap is expected
779 	 * to pre-populate all of its PDs.  See NKPDPE in vmparam.h.
780 	 */
781 	for (i = 0; i < NKPDPE; i++) {
782 		((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] =
783 				KPDphys + (i << PAGE_SHIFT);
784 		((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] |=
785 		    pmap_bits_default[PG_RW_IDX] |
786 		    pmap_bits_default[PG_V_IDX] |
787 		    pmap_bits_default[PG_U_IDX];
788 	}
789 
790 	/*
791 	 * Now set up the direct map space using either 2MB or 1GB pages
792 	 * Preset PG_M and PG_A because demotion expects it.
793 	 *
794 	 * When filling in entries in the PD pages make sure any excess
795 	 * entries are set to zero as we allocated enough PD pages
796 	 */
797 	if ((amd_feature & AMDID_PAGE1GB) == 0) {
798 		for (i = 0; i < NPDEPG * ndmpdp; i++) {
799 			((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT;
800 			((pd_entry_t *)DMPDphys)[i] |=
801 			    pmap_bits_default[PG_RW_IDX] |
802 			    pmap_bits_default[PG_V_IDX] |
803 			    pmap_bits_default[PG_PS_IDX] |
804 			    pmap_bits_default[PG_G_IDX] |
805 			    pmap_bits_default[PG_M_IDX] |
806 			    pmap_bits_default[PG_A_IDX];
807 		}
808 
809 		/*
810 		 * And the direct map space's PDP
811 		 */
812 		for (i = 0; i < ndmpdp; i++) {
813 			((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
814 							(i << PAGE_SHIFT);
815 			((pdp_entry_t *)DMPDPphys)[i] |=
816 			    pmap_bits_default[PG_RW_IDX] |
817 			    pmap_bits_default[PG_V_IDX] |
818 			    pmap_bits_default[PG_U_IDX];
819 		}
820 	} else {
821 		for (i = 0; i < ndmpdp; i++) {
822 			((pdp_entry_t *)DMPDPphys)[i] =
823 						(vm_paddr_t)i << PDPSHIFT;
824 			((pdp_entry_t *)DMPDPphys)[i] |=
825 			    pmap_bits_default[PG_RW_IDX] |
826 			    pmap_bits_default[PG_V_IDX] |
827 			    pmap_bits_default[PG_PS_IDX] |
828 			    pmap_bits_default[PG_G_IDX] |
829 			    pmap_bits_default[PG_M_IDX] |
830 			    pmap_bits_default[PG_A_IDX];
831 		}
832 	}
833 
834 	/* And recursively map PML4 to itself in order to get PTmap */
835 	((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
836 	((pdp_entry_t *)KPML4phys)[PML4PML4I] |=
837 	    pmap_bits_default[PG_RW_IDX] |
838 	    pmap_bits_default[PG_V_IDX] |
839 	    pmap_bits_default[PG_U_IDX];
840 
841 	/*
842 	 * Connect the Direct Map slots up to the PML4
843 	 */
844 	for (j = 0; j < NDMPML4E; ++j) {
845 		((pdp_entry_t *)KPML4phys)[DMPML4I + j] =
846 		    (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) |
847 		    pmap_bits_default[PG_RW_IDX] |
848 		    pmap_bits_default[PG_V_IDX] |
849 		    pmap_bits_default[PG_U_IDX];
850 	}
851 
852 	/*
853 	 * Connect the KVA slot up to the PML4
854 	 */
855 	((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
856 	((pdp_entry_t *)KPML4phys)[KPML4I] |=
857 	    pmap_bits_default[PG_RW_IDX] |
858 	    pmap_bits_default[PG_V_IDX] |
859 	    pmap_bits_default[PG_U_IDX];
860 }
861 
862 /*
863  *	Bootstrap the system enough to run with virtual memory.
864  *
865  *	On the i386 this is called after mapping has already been enabled
866  *	and just syncs the pmap module with what has already been done.
867  *	[We can't call it easily with mapping off since the kernel is not
868  *	mapped with PA == VA, hence we would have to relocate every address
869  *	from the linked base (virtual) address "KERNBASE" to the actual
870  *	(physical) address starting relative to 0]
871  */
872 void
873 pmap_bootstrap(vm_paddr_t *firstaddr)
874 {
875 	vm_offset_t va;
876 	pt_entry_t *pte;
877 
878 	KvaStart = VM_MIN_KERNEL_ADDRESS;
879 	KvaEnd = VM_MAX_KERNEL_ADDRESS;
880 	KvaSize = KvaEnd - KvaStart;
881 
882 	avail_start = *firstaddr;
883 
884 	/*
885 	 * Create an initial set of page tables to run the kernel in.
886 	 */
887 	create_pagetables(firstaddr);
888 
889 	virtual2_start = KvaStart;
890 	virtual2_end = PTOV_OFFSET;
891 
892 	virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
893 	virtual_start = pmap_kmem_choose(virtual_start);
894 
895 	virtual_end = VM_MAX_KERNEL_ADDRESS;
896 
897 	/* XXX do %cr0 as well */
898 	load_cr4(rcr4() | CR4_PGE | CR4_PSE);
899 	load_cr3(KPML4phys);
900 
901 	/*
902 	 * Initialize protection array.
903 	 */
904 	i386_protection_init();
905 
906 	/*
907 	 * The kernel's pmap is statically allocated so we don't have to use
908 	 * pmap_create, which is unlikely to work correctly at this part of
909 	 * the boot sequence (XXX and which no longer exists).
910 	 */
911 	kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
912 	kernel_pmap.pm_count = 1;
913 	CPUMASK_ASSALLONES(kernel_pmap.pm_active);
914 	RB_INIT(&kernel_pmap.pm_pvroot);
915 	spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
916 	lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
917 
918 	/*
919 	 * Reserve some special page table entries/VA space for temporary
920 	 * mapping of pages.
921 	 */
922 #define	SYSMAP(c, p, v, n)	\
923 	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
924 
925 	va = virtual_start;
926 	pte = vtopte(va);
927 
928 	/*
929 	 * CMAP1/CMAP2 are used for zeroing and copying pages.
930 	 */
931 	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
932 
933 	/*
934 	 * Crashdump maps.
935 	 */
936 	SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
937 
938 	/*
939 	 * ptvmmap is used for reading arbitrary physical pages via
940 	 * /dev/mem.
941 	 */
942 	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
943 
944 	/*
945 	 * msgbufp is used to map the system message buffer.
946 	 * XXX msgbufmap is not used.
947 	 */
948 	SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
949 	       atop(round_page(MSGBUF_SIZE)))
950 
951 	virtual_start = va;
952 	virtual_start = pmap_kmem_choose(virtual_start);
953 
954 	*CMAP1 = 0;
955 
956 	/*
957 	 * PG_G is terribly broken on SMP because we IPI invltlb's in some
958 	 * cases rather then invl1pg.  Actually, I don't even know why it
959 	 * works under UP because self-referential page table mappings
960 	 */
961 //	pgeflag = 0;
962 
963 /*
964  * Initialize the 4MB page size flag
965  */
966 //	pseflag = 0;
967 /*
968  * The 4MB page version of the initial
969  * kernel page mapping.
970  */
971 	pdir4mb = 0;
972 
973 #if !defined(DISABLE_PSE)
974 	if (cpu_feature & CPUID_PSE) {
975 		pt_entry_t ptditmp;
976 		/*
977 		 * Note that we have enabled PSE mode
978 		 */
979 //		pseflag = kernel_pmap.pmap_bits[PG_PS_IDX];
980 		ptditmp = *(PTmap + x86_64_btop(KERNBASE));
981 		ptditmp &= ~(NBPDR - 1);
982 		ptditmp |= pmap_bits_default[PG_V_IDX] |
983 		    pmap_bits_default[PG_RW_IDX] |
984 		    pmap_bits_default[PG_PS_IDX] |
985 		    pmap_bits_default[PG_U_IDX];
986 //		    pgeflag;
987 		pdir4mb = ptditmp;
988 	}
989 #endif
990 	cpu_invltlb();
991 
992 	/* Initialize the PAT MSR */
993 	pmap_init_pat();
994 	pmap_pinit_defaults(&kernel_pmap);
995 
996 	TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync",
997 			  &pmap_fast_kernel_cpusync);
998 
999 }
1000 
1001 /*
1002  * Setup the PAT MSR.
1003  */
1004 void
1005 pmap_init_pat(void)
1006 {
1007 	uint64_t pat_msr;
1008 	u_long cr0, cr4;
1009 
1010 	/*
1011 	 * Default values mapping PATi,PCD,PWT bits at system reset.
1012 	 * The default values effectively ignore the PATi bit by
1013 	 * repeating the encodings for 0-3 in 4-7, and map the PCD
1014 	 * and PWT bit combinations to the expected PAT types.
1015 	 */
1016 	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |	/* 000 */
1017 		  PAT_VALUE(1, PAT_WRITE_THROUGH) |	/* 001 */
1018 		  PAT_VALUE(2, PAT_UNCACHED) |		/* 010 */
1019 		  PAT_VALUE(3, PAT_UNCACHEABLE) |	/* 011 */
1020 		  PAT_VALUE(4, PAT_WRITE_BACK) |	/* 100 */
1021 		  PAT_VALUE(5, PAT_WRITE_THROUGH) |	/* 101 */
1022 		  PAT_VALUE(6, PAT_UNCACHED) |		/* 110 */
1023 		  PAT_VALUE(7, PAT_UNCACHEABLE);	/* 111 */
1024 	pat_pte_index[PAT_WRITE_BACK]	= 0;
1025 	pat_pte_index[PAT_WRITE_THROUGH]= 0         | X86_PG_NC_PWT;
1026 	pat_pte_index[PAT_UNCACHED]	= X86_PG_NC_PCD;
1027 	pat_pte_index[PAT_UNCACHEABLE]	= X86_PG_NC_PCD | X86_PG_NC_PWT;
1028 	pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE];
1029 	pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE];
1030 
1031 	if (cpu_feature & CPUID_PAT) {
1032 		/*
1033 		 * If we support the PAT then set-up entries for
1034 		 * WRITE_PROTECTED and WRITE_COMBINING using bit patterns
1035 		 * 4 and 5.
1036 		 */
1037 		pat_msr = (pat_msr & ~PAT_MASK(4)) |
1038 			  PAT_VALUE(4, PAT_WRITE_PROTECTED);
1039 		pat_msr = (pat_msr & ~PAT_MASK(5)) |
1040 			  PAT_VALUE(5, PAT_WRITE_COMBINING);
1041 		pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | 0;
1042 		pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PWT;
1043 
1044 		/*
1045 		 * Then enable the PAT
1046 		 */
1047 
1048 		/* Disable PGE. */
1049 		cr4 = rcr4();
1050 		load_cr4(cr4 & ~CR4_PGE);
1051 
1052 		/* Disable caches (CD = 1, NW = 0). */
1053 		cr0 = rcr0();
1054 		load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1055 
1056 		/* Flushes caches and TLBs. */
1057 		wbinvd();
1058 		cpu_invltlb();
1059 
1060 		/* Update PAT and index table. */
1061 		wrmsr(MSR_PAT, pat_msr);
1062 
1063 		/* Flush caches and TLBs again. */
1064 		wbinvd();
1065 		cpu_invltlb();
1066 
1067 		/* Restore caches and PGE. */
1068 		load_cr0(cr0);
1069 		load_cr4(cr4);
1070 		PatMsr = pat_msr;
1071 	}
1072 }
1073 
1074 /*
1075  * Set 4mb pdir for mp startup
1076  */
1077 void
1078 pmap_set_opt(void)
1079 {
1080 	if (cpu_feature & CPUID_PSE) {
1081 		load_cr4(rcr4() | CR4_PSE);
1082 		if (pdir4mb && mycpu->gd_cpuid == 0) {	/* only on BSP */
1083 			cpu_invltlb();
1084 		}
1085 	}
1086 }
1087 
1088 /*
1089  *	Initialize the pmap module.
1090  *	Called by vm_init, to initialize any structures that the pmap
1091  *	system needs to map virtual memory.
1092  *	pmap_init has been enhanced to support in a fairly consistant
1093  *	way, discontiguous physical memory.
1094  */
1095 void
1096 pmap_init(void)
1097 {
1098 	int i;
1099 	int initial_pvs;
1100 
1101 	/*
1102 	 * Allocate memory for random pmap data structures.  Includes the
1103 	 * pv_head_table.
1104 	 */
1105 
1106 	for (i = 0; i < vm_page_array_size; i++) {
1107 		vm_page_t m;
1108 
1109 		m = &vm_page_array[i];
1110 		TAILQ_INIT(&m->md.pv_list);
1111 	}
1112 
1113 	/*
1114 	 * init the pv free list
1115 	 */
1116 	initial_pvs = vm_page_array_size;
1117 	if (initial_pvs < MINPV)
1118 		initial_pvs = MINPV;
1119 	pvzone = &pvzone_store;
1120 	pvinit = (void *)kmem_alloc(&kernel_map,
1121 				    initial_pvs * sizeof (struct pv_entry));
1122 	zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
1123 		  pvinit, initial_pvs);
1124 
1125 	/*
1126 	 * Now it is safe to enable pv_table recording.
1127 	 */
1128 	pmap_initialized = TRUE;
1129 }
1130 
1131 /*
1132  * Initialize the address space (zone) for the pv_entries.  Set a
1133  * high water mark so that the system can recover from excessive
1134  * numbers of pv entries.
1135  */
1136 void
1137 pmap_init2(void)
1138 {
1139 	int shpgperproc = PMAP_SHPGPERPROC;
1140 	int entry_max;
1141 
1142 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1143 	pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1144 	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1145 	pv_entry_high_water = 9 * (pv_entry_max / 10);
1146 
1147 	/*
1148 	 * Subtract out pages already installed in the zone (hack)
1149 	 */
1150 	entry_max = pv_entry_max - vm_page_array_size;
1151 	if (entry_max <= 0)
1152 		entry_max = 1;
1153 
1154 	zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
1155 }
1156 
1157 /*
1158  * Typically used to initialize a fictitious page by vm/device_pager.c
1159  */
1160 void
1161 pmap_page_init(struct vm_page *m)
1162 {
1163 	vm_page_init(m);
1164 	TAILQ_INIT(&m->md.pv_list);
1165 }
1166 
1167 /***************************************************
1168  * Low level helper routines.....
1169  ***************************************************/
1170 
1171 /*
1172  * this routine defines the region(s) of memory that should
1173  * not be tested for the modified bit.
1174  */
1175 static __inline
1176 int
1177 pmap_track_modified(vm_pindex_t pindex)
1178 {
1179 	vm_offset_t va = (vm_offset_t)pindex << PAGE_SHIFT;
1180 	if ((va < clean_sva) || (va >= clean_eva))
1181 		return 1;
1182 	else
1183 		return 0;
1184 }
1185 
1186 /*
1187  * Extract the physical page address associated with the map/VA pair.
1188  * The page must be wired for this to work reliably.
1189  *
1190  * XXX for the moment we're using pv_find() instead of pv_get(), as
1191  *     callers might be expecting non-blocking operation.
1192  */
1193 vm_paddr_t
1194 pmap_extract(pmap_t pmap, vm_offset_t va)
1195 {
1196 	vm_paddr_t rtval;
1197 	pv_entry_t pt_pv;
1198 	pt_entry_t *ptep;
1199 
1200 	rtval = 0;
1201 	if (va >= VM_MAX_USER_ADDRESS) {
1202 		/*
1203 		 * Kernel page directories might be direct-mapped and
1204 		 * there is typically no PV tracking of pte's
1205 		 */
1206 		pd_entry_t *pt;
1207 
1208 		pt = pmap_pt(pmap, va);
1209 		if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) {
1210 			if (*pt & pmap->pmap_bits[PG_PS_IDX]) {
1211 				rtval = *pt & PG_PS_FRAME;
1212 				rtval |= va & PDRMASK;
1213 			} else {
1214 				ptep = pmap_pt_to_pte(*pt, va);
1215 				if (*pt & pmap->pmap_bits[PG_V_IDX]) {
1216 					rtval = *ptep & PG_FRAME;
1217 					rtval |= va & PAGE_MASK;
1218 				}
1219 			}
1220 		}
1221 	} else {
1222 		/*
1223 		 * User pages currently do not direct-map the page directory
1224 		 * and some pages might not used managed PVs.  But all PT's
1225 		 * will have a PV.
1226 		 */
1227 		pt_pv = pv_find(pmap, pmap_pt_pindex(va));
1228 		if (pt_pv) {
1229 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1230 			if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1231 				rtval = *ptep & PG_FRAME;
1232 				rtval |= va & PAGE_MASK;
1233 			}
1234 			pv_drop(pt_pv);
1235 		}
1236 	}
1237 	return rtval;
1238 }
1239 
1240 /*
1241  * Similar to extract but checks protections, SMP-friendly short-cut for
1242  * vm_fault_page[_quick]().  Can return NULL to cause the caller to
1243  * fall-through to the real fault code.
1244  *
1245  * The returned page, if not NULL, is held (and not busied).
1246  */
1247 vm_page_t
1248 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1249 {
1250 	if (pmap && va < VM_MAX_USER_ADDRESS) {
1251 		pv_entry_t pt_pv;
1252 		pv_entry_t pte_pv;
1253 		pt_entry_t *ptep;
1254 		pt_entry_t req;
1255 		vm_page_t m;
1256 		int error;
1257 
1258 		req = pmap->pmap_bits[PG_V_IDX] |
1259 		      pmap->pmap_bits[PG_U_IDX];
1260 		if (prot & VM_PROT_WRITE)
1261 			req |= pmap->pmap_bits[PG_RW_IDX];
1262 
1263 		pt_pv = pv_find(pmap, pmap_pt_pindex(va));
1264 		if (pt_pv == NULL)
1265 			return (NULL);
1266 		ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1267 		if ((*ptep & req) != req) {
1268 			pv_drop(pt_pv);
1269 			return (NULL);
1270 		}
1271 		pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), &error);
1272 		if (pte_pv && error == 0) {
1273 			m = pte_pv->pv_m;
1274 			vm_page_hold(m);
1275 			if (prot & VM_PROT_WRITE)
1276 				vm_page_dirty(m);
1277 			pv_put(pte_pv);
1278 		} else if (pte_pv) {
1279 			pv_drop(pte_pv);
1280 			m = NULL;
1281 		} else {
1282 			m = NULL;
1283 		}
1284 		pv_drop(pt_pv);
1285 		return(m);
1286 	} else {
1287 		return(NULL);
1288 	}
1289 }
1290 
1291 /*
1292  * Extract the physical page address associated kernel virtual address.
1293  */
1294 vm_paddr_t
1295 pmap_kextract(vm_offset_t va)
1296 {
1297 	pd_entry_t pt;		/* pt entry in pd */
1298 	vm_paddr_t pa;
1299 
1300 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1301 		pa = DMAP_TO_PHYS(va);
1302 	} else {
1303 		pt = *vtopt(va);
1304 		if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) {
1305 			pa = (pt & PG_PS_FRAME) | (va & PDRMASK);
1306 		} else {
1307 			/*
1308 			 * Beware of a concurrent promotion that changes the
1309 			 * PDE at this point!  For example, vtopte() must not
1310 			 * be used to access the PTE because it would use the
1311 			 * new PDE.  It is, however, safe to use the old PDE
1312 			 * because the page table page is preserved by the
1313 			 * promotion.
1314 			 */
1315 			pa = *pmap_pt_to_pte(pt, va);
1316 			pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1317 		}
1318 	}
1319 	return pa;
1320 }
1321 
1322 /***************************************************
1323  * Low level mapping routines.....
1324  ***************************************************/
1325 
1326 /*
1327  * Routine: pmap_kenter
1328  * Function:
1329  *  	Add a wired page to the KVA
1330  *  	NOTE! note that in order for the mapping to take effect -- you
1331  *  	should do an invltlb after doing the pmap_kenter().
1332  */
1333 void
1334 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1335 {
1336 	pt_entry_t *ptep;
1337 	pt_entry_t npte;
1338 
1339 	npte = pa |
1340 	    kernel_pmap.pmap_bits[PG_RW_IDX] |
1341 	    kernel_pmap.pmap_bits[PG_V_IDX];
1342 //	    pgeflag;
1343 	ptep = vtopte(va);
1344 #if 1
1345 	pmap_inval_smp(&kernel_pmap, va, 1, ptep, npte);
1346 #else
1347 	/* FUTURE */
1348 	if (*ptep)
1349 		pmap_inval_smp(&kernel_pmap, va, ptep, npte);
1350 	else
1351 		*ptep = npte;
1352 #endif
1353 }
1354 
1355 /*
1356  * Similar to pmap_kenter(), except we only invalidate the mapping on the
1357  * current CPU.  Returns 0 if the previous pte was 0, 1 if it wasn't
1358  * (caller can conditionalize calling smp_invltlb()).
1359  */
1360 int
1361 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1362 {
1363 	pt_entry_t *ptep;
1364 	pt_entry_t npte;
1365 	int res;
1366 
1367 	npte = pa |
1368 	    kernel_pmap.pmap_bits[PG_RW_IDX] |
1369 	    kernel_pmap.pmap_bits[PG_V_IDX];
1370 //	    pgeflag;
1371 	ptep = vtopte(va);
1372 #if 1
1373 	res = 1;
1374 #else
1375 	/* FUTURE */
1376 	res = (*ptep != 0);
1377 #endif
1378 	*ptep = npte;
1379 	cpu_invlpg((void *)va);
1380 
1381 	return res;
1382 }
1383 
1384 /*
1385  * Enter addresses into the kernel pmap but don't bother
1386  * doing any tlb invalidations.  Caller will do a rollup
1387  * invalidation via pmap_rollup_inval().
1388  */
1389 int
1390 pmap_kenter_noinval(vm_offset_t va, vm_paddr_t pa)
1391 {
1392 	pt_entry_t *ptep;
1393 	pt_entry_t npte;
1394 	int res;
1395 
1396 	npte = pa |
1397 	    kernel_pmap.pmap_bits[PG_RW_IDX] |
1398 	    kernel_pmap.pmap_bits[PG_V_IDX];
1399 //	    pgeflag;
1400 	ptep = vtopte(va);
1401 #if 1
1402 	res = 1;
1403 #else
1404 	/* FUTURE */
1405 	res = (*ptep != 0);
1406 #endif
1407 	*ptep = npte;
1408 	cpu_invlpg((void *)va);
1409 
1410 	return res;
1411 }
1412 
1413 /*
1414  * remove a page from the kernel pagetables
1415  */
1416 void
1417 pmap_kremove(vm_offset_t va)
1418 {
1419 	pt_entry_t *ptep;
1420 
1421 	ptep = vtopte(va);
1422 	pmap_inval_smp(&kernel_pmap, va, 1, ptep, 0);
1423 }
1424 
1425 void
1426 pmap_kremove_quick(vm_offset_t va)
1427 {
1428 	pt_entry_t *ptep;
1429 
1430 	ptep = vtopte(va);
1431 	(void)pte_load_clear(ptep);
1432 	cpu_invlpg((void *)va);
1433 }
1434 
1435 /*
1436  * Remove addresses from the kernel pmap but don't bother
1437  * doing any tlb invalidations.  Caller will do a rollup
1438  * invalidation via pmap_rollup_inval().
1439  */
1440 void
1441 pmap_kremove_noinval(vm_offset_t va)
1442 {
1443 	pt_entry_t *ptep;
1444 
1445 	ptep = vtopte(va);
1446 	(void)pte_load_clear(ptep);
1447 }
1448 
1449 /*
1450  * XXX these need to be recoded.  They are not used in any critical path.
1451  */
1452 void
1453 pmap_kmodify_rw(vm_offset_t va)
1454 {
1455 	atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]);
1456 	cpu_invlpg((void *)va);
1457 }
1458 
1459 /* NOT USED
1460 void
1461 pmap_kmodify_nc(vm_offset_t va)
1462 {
1463 	atomic_set_long(vtopte(va), PG_N);
1464 	cpu_invlpg((void *)va);
1465 }
1466 */
1467 
1468 /*
1469  * Used to map a range of physical addresses into kernel virtual
1470  * address space during the low level boot, typically to map the
1471  * dump bitmap, message buffer, and vm_page_array.
1472  *
1473  * These mappings are typically made at some pointer after the end of the
1474  * kernel text+data.
1475  *
1476  * We could return PHYS_TO_DMAP(start) here and not allocate any
1477  * via (*virtp), but then kmem from userland and kernel dumps won't
1478  * have access to the related pointers.
1479  */
1480 vm_offset_t
1481 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
1482 {
1483 	vm_offset_t va;
1484 	vm_offset_t va_start;
1485 
1486 	/*return PHYS_TO_DMAP(start);*/
1487 
1488 	va_start = *virtp;
1489 	va = va_start;
1490 
1491 	while (start < end) {
1492 		pmap_kenter_quick(va, start);
1493 		va += PAGE_SIZE;
1494 		start += PAGE_SIZE;
1495 	}
1496 	*virtp = va;
1497 	return va_start;
1498 }
1499 
1500 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
1501 
1502 /*
1503  * Remove the specified set of pages from the data and instruction caches.
1504  *
1505  * In contrast to pmap_invalidate_cache_range(), this function does not
1506  * rely on the CPU's self-snoop feature, because it is intended for use
1507  * when moving pages into a different cache domain.
1508  */
1509 void
1510 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1511 {
1512 	vm_offset_t daddr, eva;
1513 	int i;
1514 
1515 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1516 	    (cpu_feature & CPUID_CLFSH) == 0)
1517 		wbinvd();
1518 	else {
1519 		cpu_mfence();
1520 		for (i = 0; i < count; i++) {
1521 			daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1522 			eva = daddr + PAGE_SIZE;
1523 			for (; daddr < eva; daddr += cpu_clflush_line_size)
1524 				clflush(daddr);
1525 		}
1526 		cpu_mfence();
1527 	}
1528 }
1529 
1530 void
1531 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
1532 {
1533 	KASSERT((sva & PAGE_MASK) == 0,
1534 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
1535 	KASSERT((eva & PAGE_MASK) == 0,
1536 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
1537 
1538 	if (cpu_feature & CPUID_SS) {
1539 		; /* If "Self Snoop" is supported, do nothing. */
1540 	} else {
1541 		/* Globally invalidate caches */
1542 		cpu_wbinvd_on_all_cpus();
1543 	}
1544 }
1545 
1546 /*
1547  * Invalidate the specified range of virtual memory on all cpus associated
1548  * with the pmap.
1549  */
1550 void
1551 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1552 {
1553 	pmap_inval_smp(pmap, sva, (eva - sva) >> PAGE_SHIFT, NULL, 0);
1554 }
1555 
1556 /*
1557  * Add a list of wired pages to the kva.  This routine is used for temporary
1558  * kernel mappings such as those found in buffer cache buffer.  Page
1559  * modifications and accesses are not tracked or recorded.
1560  *
1561  * NOTE! Old mappings are simply overwritten, and we cannot assume relaxed
1562  *	 semantics as previous mappings may have been zerod without any
1563  *	 invalidation.
1564  *
1565  * The page *must* be wired.
1566  */
1567 void
1568 pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count)
1569 {
1570 	vm_offset_t end_va;
1571 	vm_offset_t va;
1572 
1573 	end_va = beg_va + count * PAGE_SIZE;
1574 
1575 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1576 		pt_entry_t *pte;
1577 
1578 		pte = vtopte(va);
1579 		*pte = VM_PAGE_TO_PHYS(*m) |
1580 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
1581 		    kernel_pmap.pmap_bits[PG_V_IDX] |
1582 		    kernel_pmap.pmap_cache_bits[(*m)->pat_mode];
1583 //		pgeflag;
1584 		m++;
1585 	}
1586 	pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
1587 }
1588 
1589 /*
1590  * This routine jerks page mappings from the kernel -- it is meant only
1591  * for temporary mappings such as those found in buffer cache buffers.
1592  * No recording modified or access status occurs.
1593  *
1594  * MPSAFE, INTERRUPT SAFE (cluster callback)
1595  */
1596 void
1597 pmap_qremove(vm_offset_t beg_va, int count)
1598 {
1599 	vm_offset_t end_va;
1600 	vm_offset_t va;
1601 
1602 	end_va = beg_va + count * PAGE_SIZE;
1603 
1604 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1605 		pt_entry_t *pte;
1606 
1607 		pte = vtopte(va);
1608 		(void)pte_load_clear(pte);
1609 		cpu_invlpg((void *)va);
1610 	}
1611 	pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
1612 }
1613 
1614 /*
1615  * This routine removes temporary kernel mappings, only invalidating them
1616  * on the current cpu.  It should only be used under carefully controlled
1617  * conditions.
1618  */
1619 void
1620 pmap_qremove_quick(vm_offset_t beg_va, int count)
1621 {
1622 	vm_offset_t end_va;
1623 	vm_offset_t va;
1624 
1625 	end_va = beg_va + count * PAGE_SIZE;
1626 
1627 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1628 		pt_entry_t *pte;
1629 
1630 		pte = vtopte(va);
1631 		(void)pte_load_clear(pte);
1632 		cpu_invlpg((void *)va);
1633 	}
1634 }
1635 
1636 /*
1637  * This routine removes temporary kernel mappings *without* invalidating
1638  * the TLB.  It can only be used on permanent kva reservations such as those
1639  * found in buffer cache buffers, under carefully controlled circumstances.
1640  *
1641  * NOTE: Repopulating these KVAs requires unconditional invalidation.
1642  *	 (pmap_qenter() does unconditional invalidation).
1643  */
1644 void
1645 pmap_qremove_noinval(vm_offset_t beg_va, int count)
1646 {
1647 	vm_offset_t end_va;
1648 	vm_offset_t va;
1649 
1650 	end_va = beg_va + count * PAGE_SIZE;
1651 
1652 	for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1653 		pt_entry_t *pte;
1654 
1655 		pte = vtopte(va);
1656 		(void)pte_load_clear(pte);
1657 	}
1658 }
1659 
1660 /*
1661  * Create a new thread and optionally associate it with a (new) process.
1662  * NOTE! the new thread's cpu may not equal the current cpu.
1663  */
1664 void
1665 pmap_init_thread(thread_t td)
1666 {
1667 	/* enforce pcb placement & alignment */
1668 	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
1669 	td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF);
1670 	td->td_savefpu = &td->td_pcb->pcb_save;
1671 	td->td_sp = (char *)td->td_pcb;	/* no -16 */
1672 }
1673 
1674 /*
1675  * This routine directly affects the fork perf for a process.
1676  */
1677 void
1678 pmap_init_proc(struct proc *p)
1679 {
1680 }
1681 
1682 static void
1683 pmap_pinit_defaults(struct pmap *pmap)
1684 {
1685 	bcopy(pmap_bits_default, pmap->pmap_bits,
1686 	      sizeof(pmap_bits_default));
1687 	bcopy(protection_codes, pmap->protection_codes,
1688 	      sizeof(protection_codes));
1689 	bcopy(pat_pte_index, pmap->pmap_cache_bits,
1690 	      sizeof(pat_pte_index));
1691 	pmap->pmap_cache_mask = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT;
1692 	pmap->copyinstr = std_copyinstr;
1693 	pmap->copyin = std_copyin;
1694 	pmap->copyout = std_copyout;
1695 	pmap->fubyte = std_fubyte;
1696 	pmap->subyte = std_subyte;
1697 	pmap->fuword = std_fuword;
1698 	pmap->suword = std_suword;
1699 	pmap->suword32 = std_suword32;
1700 }
1701 /*
1702  * Initialize pmap0/vmspace0.  This pmap is not added to pmap_list because
1703  * it, and IdlePTD, represents the template used to update all other pmaps.
1704  *
1705  * On architectures where the kernel pmap is not integrated into the user
1706  * process pmap, this pmap represents the process pmap, not the kernel pmap.
1707  * kernel_pmap should be used to directly access the kernel_pmap.
1708  */
1709 void
1710 pmap_pinit0(struct pmap *pmap)
1711 {
1712 	pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
1713 	pmap->pm_count = 1;
1714 	CPUMASK_ASSZERO(pmap->pm_active);
1715 	pmap->pm_pvhint = NULL;
1716 	RB_INIT(&pmap->pm_pvroot);
1717 	spin_init(&pmap->pm_spin, "pmapinit0");
1718 	lwkt_token_init(&pmap->pm_token, "pmap_tok");
1719 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1720 	pmap_pinit_defaults(pmap);
1721 }
1722 
1723 /*
1724  * Initialize a preallocated and zeroed pmap structure,
1725  * such as one in a vmspace structure.
1726  */
1727 static void
1728 pmap_pinit_simple(struct pmap *pmap)
1729 {
1730 	/*
1731 	 * Misc initialization
1732 	 */
1733 	pmap->pm_count = 1;
1734 	CPUMASK_ASSZERO(pmap->pm_active);
1735 	pmap->pm_pvhint = NULL;
1736 	pmap->pm_flags = PMAP_FLAG_SIMPLE;
1737 
1738 	pmap_pinit_defaults(pmap);
1739 
1740 	/*
1741 	 * Don't blow up locks/tokens on re-use (XXX fix/use drop code
1742 	 * for this).
1743 	 */
1744 	if (pmap->pm_pmlpv == NULL) {
1745 		RB_INIT(&pmap->pm_pvroot);
1746 		bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1747 		spin_init(&pmap->pm_spin, "pmapinitsimple");
1748 		lwkt_token_init(&pmap->pm_token, "pmap_tok");
1749 	}
1750 }
1751 
1752 void
1753 pmap_pinit(struct pmap *pmap)
1754 {
1755 	pv_entry_t pv;
1756 	int j;
1757 
1758 	if (pmap->pm_pmlpv) {
1759 		if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) {
1760 			pmap_puninit(pmap);
1761 		}
1762 	}
1763 
1764 	pmap_pinit_simple(pmap);
1765 	pmap->pm_flags &= ~PMAP_FLAG_SIMPLE;
1766 
1767 	/*
1768 	 * No need to allocate page table space yet but we do need a valid
1769 	 * page directory table.
1770 	 */
1771 	if (pmap->pm_pml4 == NULL) {
1772 		pmap->pm_pml4 =
1773 		    (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
1774 	}
1775 
1776 	/*
1777 	 * Allocate the page directory page, which wires it even though
1778 	 * it isn't being entered into some higher level page table (it
1779 	 * being the highest level).  If one is already cached we don't
1780 	 * have to do anything.
1781 	 */
1782 	if ((pv = pmap->pm_pmlpv) == NULL) {
1783 		pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
1784 		pmap->pm_pmlpv = pv;
1785 		pmap_kenter((vm_offset_t)pmap->pm_pml4,
1786 			    VM_PAGE_TO_PHYS(pv->pv_m));
1787 		pv_put(pv);
1788 
1789 		/*
1790 		 * Install DMAP and KMAP.
1791 		 */
1792 		for (j = 0; j < NDMPML4E; ++j) {
1793 			pmap->pm_pml4[DMPML4I + j] =
1794 			    (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) |
1795 			    pmap->pmap_bits[PG_RW_IDX] |
1796 			    pmap->pmap_bits[PG_V_IDX] |
1797 			    pmap->pmap_bits[PG_U_IDX];
1798 		}
1799 		pmap->pm_pml4[KPML4I] = KPDPphys |
1800 		    pmap->pmap_bits[PG_RW_IDX] |
1801 		    pmap->pmap_bits[PG_V_IDX] |
1802 		    pmap->pmap_bits[PG_U_IDX];
1803 
1804 		/*
1805 		 * install self-referential address mapping entry
1806 		 */
1807 		pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) |
1808 		    pmap->pmap_bits[PG_V_IDX] |
1809 		    pmap->pmap_bits[PG_RW_IDX] |
1810 		    pmap->pmap_bits[PG_A_IDX] |
1811 		    pmap->pmap_bits[PG_M_IDX];
1812 	} else {
1813 		KKASSERT(pv->pv_m->flags & PG_MAPPED);
1814 		KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
1815 	}
1816 	KKASSERT(pmap->pm_pml4[255] == 0);
1817 	KKASSERT(RB_ROOT(&pmap->pm_pvroot) == pv);
1818 	KKASSERT(pv->pv_entry.rbe_left == NULL);
1819 	KKASSERT(pv->pv_entry.rbe_right == NULL);
1820 }
1821 
1822 /*
1823  * Clean up a pmap structure so it can be physically freed.  This routine
1824  * is called by the vmspace dtor function.  A great deal of pmap data is
1825  * left passively mapped to improve vmspace management so we have a bit
1826  * of cleanup work to do here.
1827  */
1828 void
1829 pmap_puninit(pmap_t pmap)
1830 {
1831 	pv_entry_t pv;
1832 	vm_page_t p;
1833 
1834 	KKASSERT(CPUMASK_TESTZERO(pmap->pm_active));
1835 	if ((pv = pmap->pm_pmlpv) != NULL) {
1836 		if (pv_hold_try(pv) == 0)
1837 			pv_lock(pv);
1838 		KKASSERT(pv == pmap->pm_pmlpv);
1839 		p = pmap_remove_pv_page(pv);
1840 		pv_free(pv);
1841 		pmap_kremove((vm_offset_t)pmap->pm_pml4);
1842 		vm_page_busy_wait(p, FALSE, "pgpun");
1843 		KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
1844 		vm_page_unwire(p, 0);
1845 		vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
1846 
1847 		/*
1848 		 * XXX eventually clean out PML4 static entries and
1849 		 * use vm_page_free_zero()
1850 		 */
1851 		vm_page_free(p);
1852 		pmap->pm_pmlpv = NULL;
1853 	}
1854 	if (pmap->pm_pml4) {
1855 		KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys));
1856 		kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE);
1857 		pmap->pm_pml4 = NULL;
1858 	}
1859 	KKASSERT(pmap->pm_stats.resident_count == 0);
1860 	KKASSERT(pmap->pm_stats.wired_count == 0);
1861 }
1862 
1863 /*
1864  * Wire in kernel global address entries.  To avoid a race condition
1865  * between pmap initialization and pmap_growkernel, this procedure
1866  * adds the pmap to the master list (which growkernel scans to update),
1867  * then copies the template.
1868  */
1869 void
1870 pmap_pinit2(struct pmap *pmap)
1871 {
1872 	spin_lock(&pmap_spin);
1873 	TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
1874 	spin_unlock(&pmap_spin);
1875 }
1876 
1877 /*
1878  * This routine is called when various levels in the page table need to
1879  * be populated.  This routine cannot fail.
1880  *
1881  * This function returns two locked pv_entry's, one representing the
1882  * requested pv and one representing the requested pv's parent pv.  If
1883  * the pv did not previously exist it will be mapped into its parent
1884  * and wired, otherwise no additional wire count will be added.
1885  */
1886 static
1887 pv_entry_t
1888 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp)
1889 {
1890 	pt_entry_t *ptep;
1891 	pv_entry_t pv;
1892 	pv_entry_t pvp;
1893 	vm_pindex_t pt_pindex;
1894 	vm_page_t m;
1895 	int isnew;
1896 	int ispt;
1897 
1898 	/*
1899 	 * If the pv already exists and we aren't being asked for the
1900 	 * parent page table page we can just return it.  A locked+held pv
1901 	 * is returned.  The pv will also have a second hold related to the
1902 	 * pmap association that we don't have to worry about.
1903 	 */
1904 	ispt = 0;
1905 	pv = pv_alloc(pmap, ptepindex, &isnew);
1906 	if (isnew == 0 && pvpp == NULL)
1907 		return(pv);
1908 
1909 	/*
1910 	 * Special case terminal PVs.  These are not page table pages so
1911 	 * no vm_page is allocated (the caller supplied the vm_page).  If
1912 	 * pvpp is non-NULL we are being asked to also removed the pt_pv
1913 	 * for this pv.
1914 	 *
1915 	 * Note that pt_pv's are only returned for user VAs. We assert that
1916 	 * a pt_pv is not being requested for kernel VAs.
1917 	 */
1918 	if (ptepindex < pmap_pt_pindex(0)) {
1919 		if (ptepindex >= NUPTE_USER)
1920 			KKASSERT(pvpp == NULL);
1921 		else
1922 			KKASSERT(pvpp != NULL);
1923 		if (pvpp) {
1924 			pt_pindex = NUPTE_TOTAL + (ptepindex >> NPTEPGSHIFT);
1925 			pvp = pmap_allocpte(pmap, pt_pindex, NULL);
1926 			if (isnew)
1927 				vm_page_wire_quick(pvp->pv_m);
1928 			*pvpp = pvp;
1929 		} else {
1930 			pvp = NULL;
1931 		}
1932 		return(pv);
1933 	}
1934 
1935 	/*
1936 	 * Non-terminal PVs allocate a VM page to represent the page table,
1937 	 * so we have to resolve pvp and calculate ptepindex for the pvp
1938 	 * and then for the page table entry index in the pvp for
1939 	 * fall-through.
1940 	 */
1941 	if (ptepindex < pmap_pd_pindex(0)) {
1942 		/*
1943 		 * pv is PT, pvp is PD
1944 		 */
1945 		ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT;
1946 		ptepindex += NUPTE_TOTAL + NUPT_TOTAL;
1947 		pvp = pmap_allocpte(pmap, ptepindex, NULL);
1948 		if (!isnew)
1949 			goto notnew;
1950 
1951 		/*
1952 		 * PT index in PD
1953 		 */
1954 		ptepindex = pv->pv_pindex - pmap_pt_pindex(0);
1955 		ptepindex &= ((1ul << NPDEPGSHIFT) - 1);
1956 		ispt = 1;
1957 	} else if (ptepindex < pmap_pdp_pindex(0)) {
1958 		/*
1959 		 * pv is PD, pvp is PDP
1960 		 *
1961 		 * SIMPLE PMAP NOTE: Simple pmaps do not allocate above
1962 		 *		     the PD.
1963 		 */
1964 		ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT;
1965 		ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
1966 
1967 		if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
1968 			KKASSERT(pvpp == NULL);
1969 			pvp = NULL;
1970 		} else {
1971 			pvp = pmap_allocpte(pmap, ptepindex, NULL);
1972 		}
1973 		if (!isnew)
1974 			goto notnew;
1975 
1976 		/*
1977 		 * PD index in PDP
1978 		 */
1979 		ptepindex = pv->pv_pindex - pmap_pd_pindex(0);
1980 		ptepindex &= ((1ul << NPDPEPGSHIFT) - 1);
1981 	} else if (ptepindex < pmap_pml4_pindex()) {
1982 		/*
1983 		 * pv is PDP, pvp is the root pml4 table
1984 		 */
1985 		pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
1986 		if (!isnew)
1987 			goto notnew;
1988 
1989 		/*
1990 		 * PDP index in PML4
1991 		 */
1992 		ptepindex = pv->pv_pindex - pmap_pdp_pindex(0);
1993 		ptepindex &= ((1ul << NPML4EPGSHIFT) - 1);
1994 	} else {
1995 		/*
1996 		 * pv represents the top-level PML4, there is no parent.
1997 		 */
1998 		pvp = NULL;
1999 		if (!isnew)
2000 			goto notnew;
2001 	}
2002 
2003 	/*
2004 	 * This code is only reached if isnew is TRUE and this is not a
2005 	 * terminal PV.  We need to allocate a vm_page for the page table
2006 	 * at this level and enter it into the parent page table.
2007 	 *
2008 	 * page table pages are marked PG_WRITEABLE and PG_MAPPED.
2009 	 */
2010 	for (;;) {
2011 		m = vm_page_alloc(NULL, pv->pv_pindex,
2012 				  VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
2013 				  VM_ALLOC_INTERRUPT);
2014 		if (m)
2015 			break;
2016 		vm_wait(0);
2017 	}
2018 	vm_page_spin_lock(m);
2019 	pmap_page_stats_adding(m);
2020 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2021 	pv->pv_m = m;
2022 	vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
2023 	vm_page_spin_unlock(m);
2024 	vm_page_unmanage(m);	/* m must be spinunlocked */
2025 
2026 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
2027 	m->valid = VM_PAGE_BITS_ALL;
2028 	vm_page_wire(m);	/* wire for mapping in parent */
2029 
2030 	/*
2031 	 * Wire the page into pvp, bump the wire-count for pvp's page table
2032 	 * page.  Bump the resident_count for the pmap.  There is no pvp
2033 	 * for the top level, address the pm_pml4[] array directly.
2034 	 *
2035 	 * If the caller wants the parent we return it, otherwise
2036 	 * we just put it away.
2037 	 *
2038 	 * No interlock is needed for pte 0 -> non-zero.
2039 	 *
2040 	 * In the situation where *ptep is valid we might have an unmanaged
2041 	 * page table page shared from another page table which we need to
2042 	 * unshare before installing our private page table page.
2043 	 */
2044 	if (pvp) {
2045 		ptep = pv_pte_lookup(pvp, ptepindex);
2046 		if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
2047 			pt_entry_t pte;
2048 
2049 			if (ispt == 0) {
2050 				panic("pmap_allocpte: unexpected pte %p/%d",
2051 				      pvp, (int)ptepindex);
2052 			}
2053 			pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, ptep, 0);
2054 			if (vm_page_unwire_quick(
2055 					PHYS_TO_VM_PAGE(pte & PG_FRAME))) {
2056 				panic("pmap_allocpte: shared pgtable "
2057 				      "pg bad wirecount");
2058 			}
2059 			atomic_add_long(&pmap->pm_stats.resident_count, -1);
2060 		} else {
2061 			vm_page_wire_quick(pvp->pv_m);
2062 		}
2063 		*ptep = VM_PAGE_TO_PHYS(m) |
2064 		    (pmap->pmap_bits[PG_U_IDX] |
2065 		    pmap->pmap_bits[PG_RW_IDX] |
2066 		    pmap->pmap_bits[PG_V_IDX] |
2067 		    pmap->pmap_bits[PG_A_IDX] |
2068 		    pmap->pmap_bits[PG_M_IDX]);
2069 	}
2070 	vm_page_wakeup(m);
2071 notnew:
2072 	if (pvpp)
2073 		*pvpp = pvp;
2074 	else if (pvp)
2075 		pv_put(pvp);
2076 	return (pv);
2077 }
2078 
2079 /*
2080  * This version of pmap_allocpte() checks for possible segment optimizations
2081  * that would allow page-table sharing.  It can be called for terminal
2082  * page or page table page ptepindex's.
2083  *
2084  * The function is called with page table page ptepindex's for fictitious
2085  * and unmanaged terminal pages.  That is, we don't want to allocate a
2086  * terminal pv, we just want the pt_pv.  pvpp is usually passed as NULL
2087  * for this case.
2088  *
2089  * This function can return a pv and *pvpp associated with the passed in pmap
2090  * OR a pv and *pvpp associated with the shared pmap.  In the latter case
2091  * an unmanaged page table page will be entered into the pass in pmap.
2092  */
2093 static
2094 pv_entry_t
2095 pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp,
2096 		  vm_map_entry_t entry, vm_offset_t va)
2097 {
2098 	vm_object_t object;
2099 	pmap_t obpmap;
2100 	pmap_t *obpmapp;
2101 	vm_offset_t b;
2102 	pv_entry_t pte_pv;	/* in original or shared pmap */
2103 	pv_entry_t pt_pv;	/* in original or shared pmap */
2104 	pv_entry_t proc_pd_pv;	/* in original pmap */
2105 	pv_entry_t proc_pt_pv;	/* in original pmap */
2106 	pv_entry_t xpv;		/* PT in shared pmap */
2107 	pd_entry_t *pt;		/* PT entry in PD of original pmap */
2108 	pd_entry_t opte;	/* contents of *pt */
2109 	pd_entry_t npte;	/* contents of *pt */
2110 	vm_page_t m;
2111 
2112 retry:
2113 	/*
2114 	 * Basic tests, require a non-NULL vm_map_entry, require proper
2115 	 * alignment and type for the vm_map_entry, require that the
2116 	 * underlying object already be allocated.
2117 	 *
2118 	 * We allow almost any type of object to use this optimization.
2119 	 * The object itself does NOT have to be sized to a multiple of the
2120 	 * segment size, but the memory mapping does.
2121 	 *
2122 	 * XXX don't handle devices currently, because VM_PAGE_TO_PHYS()
2123 	 *     won't work as expected.
2124 	 */
2125 	if (entry == NULL ||
2126 	    pmap_mmu_optimize == 0 ||			/* not enabled */
2127 	    (pmap->pm_flags & PMAP_HVM) ||		/* special pmap */
2128 	    ptepindex >= pmap_pd_pindex(0) ||		/* not terminal or pt */
2129 	    entry->inheritance != VM_INHERIT_SHARE ||	/* not shared */
2130 	    entry->maptype != VM_MAPTYPE_NORMAL ||	/* weird map type */
2131 	    entry->object.vm_object == NULL ||		/* needs VM object */
2132 	    entry->object.vm_object->type == OBJT_DEVICE ||	/* ick */
2133 	    entry->object.vm_object->type == OBJT_MGTDEVICE ||	/* ick */
2134 	    (entry->offset & SEG_MASK) ||		/* must be aligned */
2135 	    (entry->start & SEG_MASK)) {
2136 		return(pmap_allocpte(pmap, ptepindex, pvpp));
2137 	}
2138 
2139 	/*
2140 	 * Make sure the full segment can be represented.
2141 	 */
2142 	b = va & ~(vm_offset_t)SEG_MASK;
2143 	if (b < entry->start || b + SEG_SIZE > entry->end)
2144 		return(pmap_allocpte(pmap, ptepindex, pvpp));
2145 
2146 	/*
2147 	 * If the full segment can be represented dive the VM object's
2148 	 * shared pmap, allocating as required.
2149 	 */
2150 	object = entry->object.vm_object;
2151 
2152 	if (entry->protection & VM_PROT_WRITE)
2153 		obpmapp = &object->md.pmap_rw;
2154 	else
2155 		obpmapp = &object->md.pmap_ro;
2156 
2157 #ifdef PMAP_DEBUG2
2158 	if (pmap_enter_debug > 0) {
2159 		--pmap_enter_debug;
2160 		kprintf("pmap_allocpte_seg: va=%jx prot %08x o=%p "
2161 			"obpmapp %p %p\n",
2162 			va, entry->protection, object,
2163 			obpmapp, *obpmapp);
2164 		kprintf("pmap_allocpte_seg: entry %p %jx-%jx\n",
2165 			entry, entry->start, entry->end);
2166 	}
2167 #endif
2168 
2169 	/*
2170 	 * We allocate what appears to be a normal pmap but because portions
2171 	 * of this pmap are shared with other unrelated pmaps we have to
2172 	 * set pm_active to point to all cpus.
2173 	 *
2174 	 * XXX Currently using pmap_spin to interlock the update, can't use
2175 	 *     vm_object_hold/drop because the token might already be held
2176 	 *     shared OR exclusive and we don't know.
2177 	 */
2178 	while ((obpmap = *obpmapp) == NULL) {
2179 		obpmap = kmalloc(sizeof(*obpmap), M_OBJPMAP, M_WAITOK|M_ZERO);
2180 		pmap_pinit_simple(obpmap);
2181 		pmap_pinit2(obpmap);
2182 		spin_lock(&pmap_spin);
2183 		if (*obpmapp != NULL) {
2184 			/*
2185 			 * Handle race
2186 			 */
2187 			spin_unlock(&pmap_spin);
2188 			pmap_release(obpmap);
2189 			pmap_puninit(obpmap);
2190 			kfree(obpmap, M_OBJPMAP);
2191 			obpmap = *obpmapp; /* safety */
2192 		} else {
2193 			obpmap->pm_active = smp_active_mask;
2194 			obpmap->pm_flags |= PMAP_SEGSHARED;
2195 			*obpmapp = obpmap;
2196 			spin_unlock(&pmap_spin);
2197 		}
2198 	}
2199 
2200 	/*
2201 	 * Layering is: PTE, PT, PD, PDP, PML4.  We have to return the
2202 	 * pte/pt using the shared pmap from the object but also adjust
2203 	 * the process pmap's page table page as a side effect.
2204 	 */
2205 
2206 	/*
2207 	 * Resolve the terminal PTE and PT in the shared pmap.  This is what
2208 	 * we will return.  This is true if ptepindex represents a terminal
2209 	 * page, otherwise pte_pv is actually the PT and pt_pv is actually
2210 	 * the PD.
2211 	 */
2212 	pt_pv = NULL;
2213 	pte_pv = pmap_allocpte(obpmap, ptepindex, &pt_pv);
2214 	if (ptepindex >= pmap_pt_pindex(0))
2215 		xpv = pte_pv;
2216 	else
2217 		xpv = pt_pv;
2218 
2219 	/*
2220 	 * Resolve the PD in the process pmap so we can properly share the
2221 	 * page table page.  Lock order is bottom-up (leaf first)!
2222 	 *
2223 	 * NOTE: proc_pt_pv can be NULL.
2224 	 */
2225 	proc_pt_pv = pv_get(pmap, pmap_pt_pindex(b));
2226 	proc_pd_pv = pmap_allocpte(pmap, pmap_pd_pindex(b), NULL);
2227 #ifdef PMAP_DEBUG2
2228 	if (pmap_enter_debug > 0) {
2229 		--pmap_enter_debug;
2230 		kprintf("proc_pt_pv %p (wc %d) pd_pv %p va=%jx\n",
2231 			proc_pt_pv,
2232 			(proc_pt_pv ? proc_pt_pv->pv_m->wire_count : -1),
2233 			proc_pd_pv,
2234 			va);
2235 	}
2236 #endif
2237 
2238 	/*
2239 	 * xpv is the page table page pv from the shared object
2240 	 * (for convenience), from above.
2241 	 *
2242 	 * Calculate the pte value for the PT to load into the process PD.
2243 	 * If we have to change it we must properly dispose of the previous
2244 	 * entry.
2245 	 */
2246 	pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2247 	npte = VM_PAGE_TO_PHYS(xpv->pv_m) |
2248 	    (pmap->pmap_bits[PG_U_IDX] |
2249 	    pmap->pmap_bits[PG_RW_IDX] |
2250 	    pmap->pmap_bits[PG_V_IDX] |
2251 	    pmap->pmap_bits[PG_A_IDX] |
2252 	    pmap->pmap_bits[PG_M_IDX]);
2253 
2254 	/*
2255 	 * Dispose of previous page table page if it was local to the
2256 	 * process pmap.  If the old pt is not empty we cannot dispose of it
2257 	 * until we clean it out.  This case should not arise very often so
2258 	 * it is not optimized.
2259 	 */
2260 	if (proc_pt_pv) {
2261 		pmap_inval_bulk_t bulk;
2262 
2263 		if (proc_pt_pv->pv_m->wire_count != 1) {
2264 			pv_put(proc_pd_pv);
2265 			pv_put(proc_pt_pv);
2266 			pv_put(pt_pv);
2267 			pv_put(pte_pv);
2268 			pmap_remove(pmap,
2269 				    va & ~(vm_offset_t)SEG_MASK,
2270 				    (va + SEG_SIZE) & ~(vm_offset_t)SEG_MASK);
2271 			goto retry;
2272 		}
2273 
2274 		/*
2275 		 * The release call will indirectly clean out *pt
2276 		 */
2277 		pmap_inval_bulk_init(&bulk, proc_pt_pv->pv_pmap);
2278 		pmap_release_pv(proc_pt_pv, proc_pd_pv, &bulk);
2279 		pmap_inval_bulk_flush(&bulk);
2280 		proc_pt_pv = NULL;
2281 		/* relookup */
2282 		pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2283 	}
2284 
2285 	/*
2286 	 * Handle remaining cases.
2287 	 */
2288 	if (*pt == 0) {
2289 		*pt = npte;
2290 		vm_page_wire_quick(xpv->pv_m);
2291 		vm_page_wire_quick(proc_pd_pv->pv_m);
2292 		atomic_add_long(&pmap->pm_stats.resident_count, 1);
2293 	} else if (*pt != npte) {
2294 		opte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, pt, npte);
2295 
2296 #if 0
2297 		opte = pte_load_clear(pt);
2298 		KKASSERT(opte && opte != npte);
2299 
2300 		*pt = npte;
2301 #endif
2302 		vm_page_wire_quick(xpv->pv_m);	/* pgtable pg that is npte */
2303 
2304 		/*
2305 		 * Clean up opte, bump the wire_count for the process
2306 		 * PD page representing the new entry if it was
2307 		 * previously empty.
2308 		 *
2309 		 * If the entry was not previously empty and we have
2310 		 * a PT in the proc pmap then opte must match that
2311 		 * pt.  The proc pt must be retired (this is done
2312 		 * later on in this procedure).
2313 		 *
2314 		 * NOTE: replacing valid pte, wire_count on proc_pd_pv
2315 		 * stays the same.
2316 		 */
2317 		KKASSERT(opte & pmap->pmap_bits[PG_V_IDX]);
2318 		m = PHYS_TO_VM_PAGE(opte & PG_FRAME);
2319 		if (vm_page_unwire_quick(m)) {
2320 			panic("pmap_allocpte_seg: "
2321 			      "bad wire count %p",
2322 			      m);
2323 		}
2324 	}
2325 
2326 	/*
2327 	 * The existing process page table was replaced and must be destroyed
2328 	 * here.
2329 	 */
2330 	if (proc_pd_pv)
2331 		pv_put(proc_pd_pv);
2332 	if (pvpp)
2333 		*pvpp = pt_pv;
2334 	else
2335 		pv_put(pt_pv);
2336 
2337 	return (pte_pv);
2338 }
2339 
2340 /*
2341  * Release any resources held by the given physical map.
2342  *
2343  * Called when a pmap initialized by pmap_pinit is being released.  Should
2344  * only be called if the map contains no valid mappings.
2345  *
2346  * Caller must hold pmap->pm_token
2347  */
2348 struct pmap_release_info {
2349 	pmap_t	pmap;
2350 	int	retry;
2351 };
2352 
2353 static int pmap_release_callback(pv_entry_t pv, void *data);
2354 
2355 void
2356 pmap_release(struct pmap *pmap)
2357 {
2358 	struct pmap_release_info info;
2359 
2360 	KASSERT(CPUMASK_TESTZERO(pmap->pm_active),
2361 		("pmap still active! %016jx",
2362 		(uintmax_t)CPUMASK_LOWMASK(pmap->pm_active)));
2363 
2364 	spin_lock(&pmap_spin);
2365 	TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
2366 	spin_unlock(&pmap_spin);
2367 
2368 	/*
2369 	 * Pull pv's off the RB tree in order from low to high and release
2370 	 * each page.
2371 	 */
2372 	info.pmap = pmap;
2373 	do {
2374 		info.retry = 0;
2375 		spin_lock(&pmap->pm_spin);
2376 		RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL,
2377 			pmap_release_callback, &info);
2378 		spin_unlock(&pmap->pm_spin);
2379 	} while (info.retry);
2380 
2381 
2382 	/*
2383 	 * One resident page (the pml4 page) should remain.
2384 	 * No wired pages should remain.
2385 	 */
2386 	KKASSERT(pmap->pm_stats.resident_count ==
2387 		 ((pmap->pm_flags & PMAP_FLAG_SIMPLE) ? 0 : 1));
2388 
2389 	KKASSERT(pmap->pm_stats.wired_count == 0);
2390 }
2391 
2392 static int
2393 pmap_release_callback(pv_entry_t pv, void *data)
2394 {
2395 	struct pmap_release_info *info = data;
2396 	pmap_t pmap = info->pmap;
2397 	int r;
2398 
2399 	if (pv_hold_try(pv)) {
2400 		spin_unlock(&pmap->pm_spin);
2401 	} else {
2402 		spin_unlock(&pmap->pm_spin);
2403 		pv_lock(pv);
2404 	}
2405 	if (pv->pv_pmap != pmap) {
2406 		pv_put(pv);
2407 		spin_lock(&pmap->pm_spin);
2408 		info->retry = 1;
2409 		return(-1);
2410 	}
2411 	r = pmap_release_pv(pv, NULL, NULL);
2412 	spin_lock(&pmap->pm_spin);
2413 	return(r);
2414 }
2415 
2416 /*
2417  * Called with held (i.e. also locked) pv.  This function will dispose of
2418  * the lock along with the pv.
2419  *
2420  * If the caller already holds the locked parent page table for pv it
2421  * must pass it as pvp, allowing us to avoid a deadlock, else it can
2422  * pass NULL for pvp.
2423  */
2424 static int
2425 pmap_release_pv(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk)
2426 {
2427 	vm_page_t p;
2428 
2429 	/*
2430 	 * The pmap is currently not spinlocked, pv is held+locked.
2431 	 * Remove the pv's page from its parent's page table.  The
2432 	 * parent's page table page's wire_count will be decremented.
2433 	 *
2434 	 * This will clean out the pte at any level of the page table.
2435 	 * If smp != 0 all cpus are affected.
2436 	 */
2437 	pmap_remove_pv_pte(pv, pvp, bulk);
2438 
2439 	/*
2440 	 * Terminal pvs are unhooked from their vm_pages.  Because
2441 	 * terminal pages aren't page table pages they aren't wired
2442 	 * by us, so we have to be sure not to unwire them either.
2443 	 */
2444 	if (pv->pv_pindex < pmap_pt_pindex(0)) {
2445 		pmap_remove_pv_page(pv);
2446 		goto skip;
2447 	}
2448 
2449 	/*
2450 	 * We leave the top-level page table page cached, wired, and
2451 	 * mapped in the pmap until the dtor function (pmap_puninit())
2452 	 * gets called.
2453 	 *
2454 	 * Since we are leaving the top-level pv intact we need
2455 	 * to break out of what would otherwise be an infinite loop.
2456 	 */
2457 	if (pv->pv_pindex == pmap_pml4_pindex()) {
2458 		pv_put(pv);
2459 		return(-1);
2460 	}
2461 
2462 	/*
2463 	 * For page table pages (other than the top-level page),
2464 	 * remove and free the vm_page.  The representitive mapping
2465 	 * removed above by pmap_remove_pv_pte() did not undo the
2466 	 * last wire_count so we have to do that as well.
2467 	 */
2468 	p = pmap_remove_pv_page(pv);
2469 	vm_page_busy_wait(p, FALSE, "pmaprl");
2470 	if (p->wire_count != 1) {
2471 		kprintf("p->wire_count was %016lx %d\n",
2472 			pv->pv_pindex, p->wire_count);
2473 	}
2474 	KKASSERT(p->wire_count == 1);
2475 	KKASSERT(p->flags & PG_UNMANAGED);
2476 
2477 	vm_page_unwire(p, 0);
2478 	KKASSERT(p->wire_count == 0);
2479 
2480 	vm_page_free(p);
2481 skip:
2482 	pv_free(pv);
2483 	return 0;
2484 }
2485 
2486 /*
2487  * This function will remove the pte associated with a pv from its parent.
2488  * Terminal pv's are supported.  All cpus are affected if smp != 0.
2489  *
2490  * The wire count will be dropped on the parent page table.  The wire
2491  * count on the page being removed (pv->pv_m) from the parent page table
2492  * is NOT touched.  Note that terminal pages will not have any additional
2493  * wire counts while page table pages will have at least one representing
2494  * the mapping, plus others representing sub-mappings.
2495  *
2496  * NOTE: Cannot be called on kernel page table pages, only KVM terminal
2497  *	 pages and user page table and terminal pages.
2498  *
2499  * The pv must be locked.
2500  *
2501  * XXX must lock parent pv's if they exist to remove pte XXX
2502  */
2503 static
2504 void
2505 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk)
2506 {
2507 	vm_pindex_t ptepindex = pv->pv_pindex;
2508 	pmap_t pmap = pv->pv_pmap;
2509 	vm_page_t p;
2510 	int gotpvp = 0;
2511 
2512 	KKASSERT(pmap);
2513 
2514 	if (ptepindex == pmap_pml4_pindex()) {
2515 		/*
2516 		 * We are the top level pml4 table, there is no parent.
2517 		 */
2518 		p = pmap->pm_pmlpv->pv_m;
2519 	} else if (ptepindex >= pmap_pdp_pindex(0)) {
2520 		/*
2521 		 * Remove a PDP page from the pml4e.  This can only occur
2522 		 * with user page tables.  We do not have to lock the
2523 		 * pml4 PV so just ignore pvp.
2524 		 */
2525 		vm_pindex_t pml4_pindex;
2526 		vm_pindex_t pdp_index;
2527 		pml4_entry_t *pdp;
2528 
2529 		pdp_index = ptepindex - pmap_pdp_pindex(0);
2530 		if (pvp == NULL) {
2531 			pml4_pindex = pmap_pml4_pindex();
2532 			pvp = pv_get(pv->pv_pmap, pml4_pindex);
2533 			KKASSERT(pvp);
2534 			gotpvp = 1;
2535 		}
2536 		pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)];
2537 		KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0);
2538 		p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
2539 		pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp, 0);
2540 	} else if (ptepindex >= pmap_pd_pindex(0)) {
2541 		/*
2542 		 * Remove a PD page from the pdp
2543 		 *
2544 		 * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case
2545 		 *		     of a simple pmap because it stops at
2546 		 *		     the PD page.
2547 		 */
2548 		vm_pindex_t pdp_pindex;
2549 		vm_pindex_t pd_index;
2550 		pdp_entry_t *pd;
2551 
2552 		pd_index = ptepindex - pmap_pd_pindex(0);
2553 
2554 		if (pvp == NULL) {
2555 			pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
2556 				     (pd_index >> NPML4EPGSHIFT);
2557 			pvp = pv_get(pv->pv_pmap, pdp_pindex);
2558 			if (pvp)
2559 				gotpvp = 1;
2560 		}
2561 		if (pvp) {
2562 			pd = pv_pte_lookup(pvp, pd_index &
2563 						((1ul << NPDPEPGSHIFT) - 1));
2564 			KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0);
2565 			p = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
2566 			pmap_inval_bulk(bulk, (vm_offset_t)-1, pd, 0);
2567 		} else {
2568 			KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE);
2569 			p = pv->pv_m;		/* degenerate test later */
2570 		}
2571 	} else if (ptepindex >= pmap_pt_pindex(0)) {
2572 		/*
2573 		 *  Remove a PT page from the pd
2574 		 */
2575 		vm_pindex_t pd_pindex;
2576 		vm_pindex_t pt_index;
2577 		pd_entry_t *pt;
2578 
2579 		pt_index = ptepindex - pmap_pt_pindex(0);
2580 
2581 		if (pvp == NULL) {
2582 			pd_pindex = NUPTE_TOTAL + NUPT_TOTAL +
2583 				    (pt_index >> NPDPEPGSHIFT);
2584 			pvp = pv_get(pv->pv_pmap, pd_pindex);
2585 			KKASSERT(pvp);
2586 			gotpvp = 1;
2587 		}
2588 		pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1));
2589 		KKASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0);
2590 		p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
2591 		pmap_inval_bulk(bulk, (vm_offset_t)-1, pt, 0);
2592 	} else {
2593 		/*
2594 		 * Remove a PTE from the PT page
2595 		 *
2596 		 * NOTE: pv's must be locked bottom-up to avoid deadlocking.
2597 		 *	 pv is a pte_pv so we can safely lock pt_pv.
2598 		 *
2599 		 * NOTE: FICTITIOUS pages may have multiple physical mappings
2600 		 *	 so PHYS_TO_VM_PAGE() will not necessarily work for
2601 		 *	 terminal ptes.
2602 		 */
2603 		vm_pindex_t pt_pindex;
2604 		pt_entry_t *ptep;
2605 		pt_entry_t pte;
2606 		vm_offset_t va;
2607 
2608 		pt_pindex = ptepindex >> NPTEPGSHIFT;
2609 		va = (vm_offset_t)ptepindex << PAGE_SHIFT;
2610 
2611 		if (ptepindex >= NUPTE_USER) {
2612 			ptep = vtopte(ptepindex << PAGE_SHIFT);
2613 			KKASSERT(pvp == NULL);
2614 		} else {
2615 			if (pvp == NULL) {
2616 				pt_pindex = NUPTE_TOTAL +
2617 					    (ptepindex >> NPDPEPGSHIFT);
2618 				pvp = pv_get(pv->pv_pmap, pt_pindex);
2619 				KKASSERT(pvp);
2620 				gotpvp = 1;
2621 			}
2622 			ptep = pv_pte_lookup(pvp, ptepindex &
2623 						  ((1ul << NPDPEPGSHIFT) - 1));
2624 		}
2625 		pte = pmap_inval_bulk(bulk, va, ptep, 0);
2626 		if (bulk == NULL)		/* XXX */
2627 			cpu_invlpg((void *)va);	/* XXX */
2628 
2629 		/*
2630 		 * Now update the vm_page_t
2631 		 */
2632 		if ((pte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) !=
2633 		    (pmap->pmap_bits[PG_MANAGED_IDX]|pmap->pmap_bits[PG_V_IDX])) {
2634 			kprintf("remove_pte badpte %016lx %016lx %d\n",
2635 				pte, pv->pv_pindex,
2636 				pv->pv_pindex < pmap_pt_pindex(0));
2637 		}
2638 		/* PHYS_TO_VM_PAGE() will not work for FICTITIOUS pages */
2639 		/*KKASSERT((pte & (PG_MANAGED|PG_V)) == (PG_MANAGED|PG_V));*/
2640 		if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
2641 			p = pv->pv_m;
2642 		else
2643 			p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
2644 		/* p = pv->pv_m; */
2645 
2646 		if (pte & pmap->pmap_bits[PG_M_IDX]) {
2647 			if (pmap_track_modified(ptepindex))
2648 				vm_page_dirty(p);
2649 		}
2650 		if (pte & pmap->pmap_bits[PG_A_IDX]) {
2651 			vm_page_flag_set(p, PG_REFERENCED);
2652 		}
2653 		if (pte & pmap->pmap_bits[PG_W_IDX])
2654 			atomic_add_long(&pmap->pm_stats.wired_count, -1);
2655 		if (pte & pmap->pmap_bits[PG_G_IDX])
2656 			cpu_invlpg((void *)va);
2657 	}
2658 
2659 	/*
2660 	 * Unwire the parent page table page.  The wire_count cannot go below
2661 	 * 1 here because the parent page table page is itself still mapped.
2662 	 *
2663 	 * XXX remove the assertions later.
2664 	 */
2665 	KKASSERT(pv->pv_m == p);
2666 	if (pvp && vm_page_unwire_quick(pvp->pv_m))
2667 		panic("pmap_remove_pv_pte: Insufficient wire_count");
2668 
2669 	if (gotpvp)
2670 		pv_put(pvp);
2671 }
2672 
2673 /*
2674  * Remove the vm_page association to a pv.  The pv must be locked.
2675  */
2676 static
2677 vm_page_t
2678 pmap_remove_pv_page(pv_entry_t pv)
2679 {
2680 	vm_page_t m;
2681 
2682 	m = pv->pv_m;
2683 	KKASSERT(m);
2684 	vm_page_spin_lock(m);
2685 	pv->pv_m = NULL;
2686 	TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2687 	pmap_page_stats_deleting(m);
2688 	/*
2689 	if (m->object)
2690 		atomic_add_int(&m->object->agg_pv_list_count, -1);
2691 	*/
2692 	if (TAILQ_EMPTY(&m->md.pv_list))
2693 		vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
2694 	vm_page_spin_unlock(m);
2695 	return(m);
2696 }
2697 
2698 /*
2699  * Grow the number of kernel page table entries, if needed.
2700  *
2701  * This routine is always called to validate any address space
2702  * beyond KERNBASE (for kldloads).  kernel_vm_end only governs the address
2703  * space below KERNBASE.
2704  */
2705 void
2706 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
2707 {
2708 	vm_paddr_t paddr;
2709 	vm_offset_t ptppaddr;
2710 	vm_page_t nkpg;
2711 	pd_entry_t *pt, newpt;
2712 	pdp_entry_t newpd;
2713 	int update_kernel_vm_end;
2714 
2715 	/*
2716 	 * bootstrap kernel_vm_end on first real VM use
2717 	 */
2718 	if (kernel_vm_end == 0) {
2719 		kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
2720 		nkpt = 0;
2721 		while ((*pmap_pt(&kernel_pmap, kernel_vm_end) & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
2722 			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
2723 					~(PAGE_SIZE * NPTEPG - 1);
2724 			nkpt++;
2725 			if (kernel_vm_end - 1 >= kernel_map.max_offset) {
2726 				kernel_vm_end = kernel_map.max_offset;
2727 				break;
2728 			}
2729 		}
2730 	}
2731 
2732 	/*
2733 	 * Fill in the gaps.  kernel_vm_end is only adjusted for ranges
2734 	 * below KERNBASE.  Ranges above KERNBASE are kldloaded and we
2735 	 * do not want to force-fill 128G worth of page tables.
2736 	 */
2737 	if (kstart < KERNBASE) {
2738 		if (kstart > kernel_vm_end)
2739 			kstart = kernel_vm_end;
2740 		KKASSERT(kend <= KERNBASE);
2741 		update_kernel_vm_end = 1;
2742 	} else {
2743 		update_kernel_vm_end = 0;
2744 	}
2745 
2746 	kstart = rounddown2(kstart, PAGE_SIZE * NPTEPG);
2747 	kend = roundup2(kend, PAGE_SIZE * NPTEPG);
2748 
2749 	if (kend - 1 >= kernel_map.max_offset)
2750 		kend = kernel_map.max_offset;
2751 
2752 	while (kstart < kend) {
2753 		pt = pmap_pt(&kernel_pmap, kstart);
2754 		if (pt == NULL) {
2755 			/* We need a new PDP entry */
2756 			nkpg = vm_page_alloc(NULL, nkpt,
2757 			                     VM_ALLOC_NORMAL |
2758 					     VM_ALLOC_SYSTEM |
2759 					     VM_ALLOC_INTERRUPT);
2760 			if (nkpg == NULL) {
2761 				panic("pmap_growkernel: no memory to grow "
2762 				      "kernel");
2763 			}
2764 			paddr = VM_PAGE_TO_PHYS(nkpg);
2765 			pmap_zero_page(paddr);
2766 			newpd = (pdp_entry_t)
2767 			    (paddr |
2768 			    kernel_pmap.pmap_bits[PG_V_IDX] |
2769 			    kernel_pmap.pmap_bits[PG_RW_IDX] |
2770 			    kernel_pmap.pmap_bits[PG_A_IDX] |
2771 			    kernel_pmap.pmap_bits[PG_M_IDX]);
2772 			*pmap_pd(&kernel_pmap, kstart) = newpd;
2773 			nkpt++;
2774 			continue; /* try again */
2775 		}
2776 		if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
2777 			kstart = (kstart + PAGE_SIZE * NPTEPG) &
2778 				 ~(PAGE_SIZE * NPTEPG - 1);
2779 			if (kstart - 1 >= kernel_map.max_offset) {
2780 				kstart = kernel_map.max_offset;
2781 				break;
2782 			}
2783 			continue;
2784 		}
2785 
2786 		/*
2787 		 * This index is bogus, but out of the way
2788 		 */
2789 		nkpg = vm_page_alloc(NULL, nkpt,
2790 				     VM_ALLOC_NORMAL |
2791 				     VM_ALLOC_SYSTEM |
2792 				     VM_ALLOC_INTERRUPT);
2793 		if (nkpg == NULL)
2794 			panic("pmap_growkernel: no memory to grow kernel");
2795 
2796 		vm_page_wire(nkpg);
2797 		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
2798 		pmap_zero_page(ptppaddr);
2799 		newpt = (pd_entry_t) (ptppaddr |
2800 		    kernel_pmap.pmap_bits[PG_V_IDX] |
2801 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
2802 		    kernel_pmap.pmap_bits[PG_A_IDX] |
2803 		    kernel_pmap.pmap_bits[PG_M_IDX]);
2804 		*pmap_pt(&kernel_pmap, kstart) = newpt;
2805 		nkpt++;
2806 
2807 		kstart = (kstart + PAGE_SIZE * NPTEPG) &
2808 			  ~(PAGE_SIZE * NPTEPG - 1);
2809 
2810 		if (kstart - 1 >= kernel_map.max_offset) {
2811 			kstart = kernel_map.max_offset;
2812 			break;
2813 		}
2814 	}
2815 
2816 	/*
2817 	 * Only update kernel_vm_end for areas below KERNBASE.
2818 	 */
2819 	if (update_kernel_vm_end && kernel_vm_end < kstart)
2820 		kernel_vm_end = kstart;
2821 }
2822 
2823 /*
2824  *	Add a reference to the specified pmap.
2825  */
2826 void
2827 pmap_reference(pmap_t pmap)
2828 {
2829 	if (pmap != NULL) {
2830 		lwkt_gettoken(&pmap->pm_token);
2831 		++pmap->pm_count;
2832 		lwkt_reltoken(&pmap->pm_token);
2833 	}
2834 }
2835 
2836 /***************************************************
2837  * page management routines.
2838  ***************************************************/
2839 
2840 /*
2841  * Hold a pv without locking it
2842  */
2843 static void
2844 pv_hold(pv_entry_t pv)
2845 {
2846 	atomic_add_int(&pv->pv_hold, 1);
2847 }
2848 
2849 /*
2850  * Hold a pv_entry, preventing its destruction.  TRUE is returned if the pv
2851  * was successfully locked, FALSE if it wasn't.  The caller must dispose of
2852  * the pv properly.
2853  *
2854  * Either the pmap->pm_spin or the related vm_page_spin (if traversing a
2855  * pv list via its page) must be held by the caller.
2856  */
2857 static int
2858 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL)
2859 {
2860 	u_int count;
2861 
2862 	/*
2863 	 * Critical path shortcut expects pv to already have one ref
2864 	 * (for the pv->pv_pmap).
2865 	 */
2866 	if (atomic_cmpset_int(&pv->pv_hold, 1, PV_HOLD_LOCKED | 2)) {
2867 #ifdef PMAP_DEBUG
2868 		pv->pv_func = func;
2869 		pv->pv_line = lineno;
2870 #endif
2871 		return TRUE;
2872 	}
2873 
2874 	for (;;) {
2875 		count = pv->pv_hold;
2876 		cpu_ccfence();
2877 		if ((count & PV_HOLD_LOCKED) == 0) {
2878 			if (atomic_cmpset_int(&pv->pv_hold, count,
2879 					      (count + 1) | PV_HOLD_LOCKED)) {
2880 #ifdef PMAP_DEBUG
2881 				pv->pv_func = func;
2882 				pv->pv_line = lineno;
2883 #endif
2884 				return TRUE;
2885 			}
2886 		} else {
2887 			if (atomic_cmpset_int(&pv->pv_hold, count, count + 1))
2888 				return FALSE;
2889 		}
2890 		/* retry */
2891 	}
2892 }
2893 
2894 /*
2895  * Drop a previously held pv_entry which could not be locked, allowing its
2896  * destruction.
2897  *
2898  * Must not be called with a spinlock held as we might zfree() the pv if it
2899  * is no longer associated with a pmap and this was the last hold count.
2900  */
2901 static void
2902 pv_drop(pv_entry_t pv)
2903 {
2904 	u_int count;
2905 
2906 	for (;;) {
2907 		count = pv->pv_hold;
2908 		cpu_ccfence();
2909 		KKASSERT((count & PV_HOLD_MASK) > 0);
2910 		KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) !=
2911 			 (PV_HOLD_LOCKED | 1));
2912 		if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) {
2913 			if ((count & PV_HOLD_MASK) == 1) {
2914 #ifdef PMAP_DEBUG2
2915 				if (pmap_enter_debug > 0) {
2916 					--pmap_enter_debug;
2917 					kprintf("pv_drop: free pv %p\n", pv);
2918 				}
2919 #endif
2920 				KKASSERT(count == 1);
2921 				KKASSERT(pv->pv_pmap == NULL);
2922 				zfree(pvzone, pv);
2923 			}
2924 			return;
2925 		}
2926 		/* retry */
2927 	}
2928 }
2929 
2930 /*
2931  * Find or allocate the requested PV entry, returning a locked, held pv.
2932  *
2933  * If (*isnew) is non-zero, the returned pv will have two hold counts, one
2934  * for the caller and one representing the pmap and vm_page association.
2935  *
2936  * If (*isnew) is zero, the returned pv will have only one hold count.
2937  *
2938  * Since both associations can only be adjusted while the pv is locked,
2939  * together they represent just one additional hold.
2940  */
2941 static
2942 pv_entry_t
2943 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL)
2944 {
2945 	pv_entry_t pv;
2946 	pv_entry_t pnew = NULL;
2947 
2948 	spin_lock(&pmap->pm_spin);
2949 	for (;;) {
2950 		if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) {
2951 			pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot,
2952 							pindex);
2953 		}
2954 		if (pv == NULL) {
2955 			if (pnew == NULL) {
2956 				spin_unlock(&pmap->pm_spin);
2957 				pnew = zalloc(pvzone);
2958 				spin_lock(&pmap->pm_spin);
2959 				continue;
2960 			}
2961 			pnew->pv_pmap = pmap;
2962 			pnew->pv_pindex = pindex;
2963 			pnew->pv_hold = PV_HOLD_LOCKED | 2;
2964 #ifdef PMAP_DEBUG
2965 			pnew->pv_func = func;
2966 			pnew->pv_line = lineno;
2967 #endif
2968 			pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew);
2969 			++pmap->pm_generation;
2970 			atomic_add_long(&pmap->pm_stats.resident_count, 1);
2971 			spin_unlock(&pmap->pm_spin);
2972 			*isnew = 1;
2973 			return(pnew);
2974 		}
2975 		if (pnew) {
2976 			spin_unlock(&pmap->pm_spin);
2977 			zfree(pvzone, pnew);
2978 			pnew = NULL;
2979 			spin_lock(&pmap->pm_spin);
2980 			continue;
2981 		}
2982 		if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
2983 			spin_unlock(&pmap->pm_spin);
2984 		} else {
2985 			spin_unlock(&pmap->pm_spin);
2986 			_pv_lock(pv PMAP_DEBUG_COPY);
2987 		}
2988 		if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) {
2989 			*isnew = 0;
2990 			return(pv);
2991 		}
2992 		pv_put(pv);
2993 		spin_lock(&pmap->pm_spin);
2994 	}
2995 }
2996 
2997 /*
2998  * Find the requested PV entry, returning a locked+held pv or NULL
2999  */
3000 static
3001 pv_entry_t
3002 _pv_get(pmap_t pmap, vm_pindex_t pindex PMAP_DEBUG_DECL)
3003 {
3004 	pv_entry_t pv;
3005 
3006 	spin_lock(&pmap->pm_spin);
3007 	for (;;) {
3008 		/*
3009 		 * Shortcut cache
3010 		 */
3011 		if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) {
3012 			pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot,
3013 							pindex);
3014 		}
3015 		if (pv == NULL) {
3016 			spin_unlock(&pmap->pm_spin);
3017 			return NULL;
3018 		}
3019 		if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
3020 			spin_unlock(&pmap->pm_spin);
3021 		} else {
3022 			spin_unlock(&pmap->pm_spin);
3023 			_pv_lock(pv PMAP_DEBUG_COPY);
3024 		}
3025 		if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) {
3026 			pv_cache(pv, pindex);
3027 			return(pv);
3028 		}
3029 		pv_put(pv);
3030 		spin_lock(&pmap->pm_spin);
3031 	}
3032 }
3033 
3034 /*
3035  * Lookup, hold, and attempt to lock (pmap,pindex).
3036  *
3037  * If the entry does not exist NULL is returned and *errorp is set to 0
3038  *
3039  * If the entry exists and could be successfully locked it is returned and
3040  * errorp is set to 0.
3041  *
3042  * If the entry exists but could NOT be successfully locked it is returned
3043  * held and *errorp is set to 1.
3044  */
3045 static
3046 pv_entry_t
3047 pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp)
3048 {
3049 	pv_entry_t pv;
3050 
3051 	spin_lock_shared(&pmap->pm_spin);
3052 	if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex)
3053 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
3054 	if (pv == NULL) {
3055 		spin_unlock_shared(&pmap->pm_spin);
3056 		*errorp = 0;
3057 		return NULL;
3058 	}
3059 	if (pv_hold_try(pv)) {
3060 		pv_cache(pv, pindex);
3061 		spin_unlock_shared(&pmap->pm_spin);
3062 		*errorp = 0;
3063 		KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex);
3064 		return(pv);	/* lock succeeded */
3065 	}
3066 	spin_unlock_shared(&pmap->pm_spin);
3067 	*errorp = 1;
3068 	return (pv);		/* lock failed */
3069 }
3070 
3071 /*
3072  * Find the requested PV entry, returning a held pv or NULL
3073  */
3074 static
3075 pv_entry_t
3076 pv_find(pmap_t pmap, vm_pindex_t pindex)
3077 {
3078 	pv_entry_t pv;
3079 
3080 	spin_lock_shared(&pmap->pm_spin);
3081 
3082 	if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex)
3083 		pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
3084 	if (pv == NULL) {
3085 		spin_unlock_shared(&pmap->pm_spin);
3086 		return NULL;
3087 	}
3088 	pv_hold(pv);
3089 	pv_cache(pv, pindex);
3090 	spin_unlock_shared(&pmap->pm_spin);
3091 	return(pv);
3092 }
3093 
3094 /*
3095  * Lock a held pv, keeping the hold count
3096  */
3097 static
3098 void
3099 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL)
3100 {
3101 	u_int count;
3102 
3103 	for (;;) {
3104 		count = pv->pv_hold;
3105 		cpu_ccfence();
3106 		if ((count & PV_HOLD_LOCKED) == 0) {
3107 			if (atomic_cmpset_int(&pv->pv_hold, count,
3108 					      count | PV_HOLD_LOCKED)) {
3109 #ifdef PMAP_DEBUG
3110 				pv->pv_func = func;
3111 				pv->pv_line = lineno;
3112 #endif
3113 				return;
3114 			}
3115 			continue;
3116 		}
3117 		tsleep_interlock(pv, 0);
3118 		if (atomic_cmpset_int(&pv->pv_hold, count,
3119 				      count | PV_HOLD_WAITING)) {
3120 #ifdef PMAP_DEBUG
3121 			kprintf("pv waiting on %s:%d\n",
3122 					pv->pv_func, pv->pv_line);
3123 #endif
3124 			tsleep(pv, PINTERLOCKED, "pvwait", hz);
3125 		}
3126 		/* retry */
3127 	}
3128 }
3129 
3130 /*
3131  * Unlock a held and locked pv, keeping the hold count.
3132  */
3133 static
3134 void
3135 pv_unlock(pv_entry_t pv)
3136 {
3137 	u_int count;
3138 
3139 	for (;;) {
3140 		count = pv->pv_hold;
3141 		cpu_ccfence();
3142 		KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >=
3143 			 (PV_HOLD_LOCKED | 1));
3144 		if (atomic_cmpset_int(&pv->pv_hold, count,
3145 				      count &
3146 				      ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) {
3147 			if (count & PV_HOLD_WAITING)
3148 				wakeup(pv);
3149 			break;
3150 		}
3151 	}
3152 }
3153 
3154 /*
3155  * Unlock and drop a pv.  If the pv is no longer associated with a pmap
3156  * and the hold count drops to zero we will free it.
3157  *
3158  * Caller should not hold any spin locks.  We are protected from hold races
3159  * by virtue of holds only occuring only with a pmap_spin or vm_page_spin
3160  * lock held.  A pv cannot be located otherwise.
3161  */
3162 static
3163 void
3164 pv_put(pv_entry_t pv)
3165 {
3166 #ifdef PMAP_DEBUG2
3167 	if (pmap_enter_debug > 0) {
3168 		--pmap_enter_debug;
3169 		kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold);
3170 	}
3171 #endif
3172 
3173 	/*
3174 	 * Fast - shortcut most common condition
3175 	 */
3176 	if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1))
3177 		return;
3178 
3179 	/*
3180 	 * Slow
3181 	 */
3182 	pv_unlock(pv);
3183 	pv_drop(pv);
3184 }
3185 
3186 /*
3187  * Remove the pmap association from a pv, require that pv_m already be removed,
3188  * then unlock and drop the pv.  Any pte operations must have already been
3189  * completed.  This call may result in a last-drop which will physically free
3190  * the pv.
3191  *
3192  * Removing the pmap association entails an additional drop.
3193  *
3194  * pv must be exclusively locked on call and will be disposed of on return.
3195  */
3196 static
3197 void
3198 pv_free(pv_entry_t pv)
3199 {
3200 	pmap_t pmap;
3201 
3202 	KKASSERT(pv->pv_m == NULL);
3203 	KKASSERT((pv->pv_hold & PV_HOLD_MASK) >= 2);
3204 	if ((pmap = pv->pv_pmap) != NULL) {
3205 		spin_lock(&pmap->pm_spin);
3206 		pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv);
3207 		++pmap->pm_generation;
3208 		if (pmap->pm_pvhint == pv)
3209 			pmap->pm_pvhint = NULL;
3210 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
3211 		pv->pv_pmap = NULL;
3212 		pv->pv_pindex = 0;
3213 		spin_unlock(&pmap->pm_spin);
3214 
3215 		/*
3216 		 * Try to shortcut three atomic ops, otherwise fall through
3217 		 * and do it normally.  Drop two refs and the lock all in
3218 		 * one go.
3219 		 */
3220 		if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) {
3221 #ifdef PMAP_DEBUG2
3222 			if (pmap_enter_debug > 0) {
3223 				--pmap_enter_debug;
3224 				kprintf("pv_free: free pv %p\n", pv);
3225 			}
3226 #endif
3227 			zfree(pvzone, pv);
3228 			return;
3229 		}
3230 		pv_drop(pv);	/* ref for pv_pmap */
3231 	}
3232 	pv_put(pv);
3233 }
3234 
3235 /*
3236  * This routine is very drastic, but can save the system
3237  * in a pinch.
3238  */
3239 void
3240 pmap_collect(void)
3241 {
3242 	int i;
3243 	vm_page_t m;
3244 	static int warningdone=0;
3245 
3246 	if (pmap_pagedaemon_waken == 0)
3247 		return;
3248 	pmap_pagedaemon_waken = 0;
3249 	if (warningdone < 5) {
3250 		kprintf("pmap_collect: collecting pv entries -- "
3251 			"suggest increasing PMAP_SHPGPERPROC\n");
3252 		warningdone++;
3253 	}
3254 
3255 	for (i = 0; i < vm_page_array_size; i++) {
3256 		m = &vm_page_array[i];
3257 		if (m->wire_count || m->hold_count)
3258 			continue;
3259 		if (vm_page_busy_try(m, TRUE) == 0) {
3260 			if (m->wire_count == 0 && m->hold_count == 0) {
3261 				pmap_remove_all(m);
3262 			}
3263 			vm_page_wakeup(m);
3264 		}
3265 	}
3266 }
3267 
3268 /*
3269  * Scan the pmap for active page table entries and issue a callback.
3270  * The callback must dispose of pte_pv, whos PTE entry is at *ptep in
3271  * its parent page table.
3272  *
3273  * pte_pv will be NULL if the page or page table is unmanaged.
3274  * pt_pv will point to the page table page containing the pte for the page.
3275  *
3276  * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page),
3277  *	 we pass a NULL pte_pv and we pass a pt_pv pointing to the passed
3278  *	 process pmap's PD and page to the callback function.  This can be
3279  *	 confusing because the pt_pv is really a pd_pv, and the target page
3280  *	 table page is simply aliased by the pmap and not owned by it.
3281  *
3282  * It is assumed that the start and end are properly rounded to the page size.
3283  *
3284  * It is assumed that PD pages and above are managed and thus in the RB tree,
3285  * allowing us to use RB_SCAN from the PD pages down for ranged scans.
3286  */
3287 struct pmap_scan_info {
3288 	struct pmap *pmap;
3289 	vm_offset_t sva;
3290 	vm_offset_t eva;
3291 	vm_pindex_t sva_pd_pindex;
3292 	vm_pindex_t eva_pd_pindex;
3293 	void (*func)(pmap_t, struct pmap_scan_info *,
3294 		     pv_entry_t, pv_entry_t, int, vm_offset_t,
3295 		     pt_entry_t *, void *);
3296 	void *arg;
3297 	pmap_inval_bulk_t bulk_core;
3298 	pmap_inval_bulk_t *bulk;
3299 	int count;
3300 };
3301 
3302 static int pmap_scan_cmp(pv_entry_t pv, void *data);
3303 static int pmap_scan_callback(pv_entry_t pv, void *data);
3304 
3305 static void
3306 pmap_scan(struct pmap_scan_info *info, int smp_inval)
3307 {
3308 	struct pmap *pmap = info->pmap;
3309 	pv_entry_t pd_pv;	/* A page directory PV */
3310 	pv_entry_t pt_pv;	/* A page table PV */
3311 	pv_entry_t pte_pv;	/* A page table entry PV */
3312 	pt_entry_t *ptep;
3313 	pt_entry_t oldpte;
3314 	struct pv_entry dummy_pv;
3315 	int generation;
3316 
3317 	if (pmap == NULL)
3318 		return;
3319 	if (smp_inval) {
3320 		info->bulk = &info->bulk_core;
3321 		pmap_inval_bulk_init(&info->bulk_core, pmap);
3322 	} else {
3323 		info->bulk = NULL;
3324 	}
3325 
3326 	/*
3327 	 * Hold the token for stability; if the pmap is empty we have nothing
3328 	 * to do.
3329 	 */
3330 	lwkt_gettoken(&pmap->pm_token);
3331 #if 0
3332 	if (pmap->pm_stats.resident_count == 0) {
3333 		lwkt_reltoken(&pmap->pm_token);
3334 		return;
3335 	}
3336 #endif
3337 
3338 	info->count = 0;
3339 
3340 again:
3341 	/*
3342 	 * Special handling for scanning one page, which is a very common
3343 	 * operation (it is?).
3344 	 *
3345 	 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4
3346 	 */
3347 	if (info->sva + PAGE_SIZE == info->eva) {
3348 		generation = pmap->pm_generation;
3349 		if (info->sva >= VM_MAX_USER_ADDRESS) {
3350 			/*
3351 			 * Kernel mappings do not track wire counts on
3352 			 * page table pages and only maintain pd_pv and
3353 			 * pte_pv levels so pmap_scan() works.
3354 			 */
3355 			pt_pv = NULL;
3356 			pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva));
3357 			ptep = vtopte(info->sva);
3358 		} else {
3359 			/*
3360 			 * User pages which are unmanaged will not have a
3361 			 * pte_pv.  User page table pages which are unmanaged
3362 			 * (shared from elsewhere) will also not have a pt_pv.
3363 			 * The func() callback will pass both pte_pv and pt_pv
3364 			 * as NULL in that case.
3365 			 */
3366 			pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva));
3367 			pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva));
3368 			if (pt_pv == NULL) {
3369 				KKASSERT(pte_pv == NULL);
3370 				pd_pv = pv_get(pmap, pmap_pd_pindex(info->sva));
3371 				if (pd_pv) {
3372 					ptep = pv_pte_lookup(pd_pv,
3373 						    pmap_pt_index(info->sva));
3374 					if (*ptep) {
3375 						info->func(pmap, info,
3376 						     NULL, pd_pv, 1,
3377 						     info->sva, ptep,
3378 						     info->arg);
3379 					}
3380 					pv_put(pd_pv);
3381 				}
3382 				goto fast_skip;
3383 			}
3384 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva));
3385 		}
3386 
3387 		/*
3388 		 * NOTE: *ptep can't be ripped out from under us if we hold
3389 		 *	 pte_pv locked, but bits can change.  However, there is
3390 		 *	 a race where another thread may be inserting pte_pv
3391 		 *	 and setting *ptep just after our pte_pv lookup fails.
3392 		 *
3393 		 *	 In this situation we can end up with a NULL pte_pv
3394 		 *	 but find that we have a managed *ptep.  We explicitly
3395 		 *	 check for this race.
3396 		 */
3397 		oldpte = *ptep;
3398 		cpu_ccfence();
3399 		if (oldpte == 0) {
3400 			/*
3401 			 * Unlike the pv_find() case below we actually
3402 			 * acquired a locked pv in this case so any
3403 			 * race should have been resolved.  It is expected
3404 			 * to not exist.
3405 			 */
3406 			KKASSERT(pte_pv == NULL);
3407 		} else if (pte_pv) {
3408 			KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
3409 					   pmap->pmap_bits[PG_V_IDX])) ==
3410 				(pmap->pmap_bits[PG_MANAGED_IDX] |
3411 				 pmap->pmap_bits[PG_V_IDX]),
3412 			    ("badA *ptep %016lx/%016lx sva %016lx pte_pv %p"
3413 			     "generation %d/%d",
3414 			    *ptep, oldpte, info->sva, pte_pv,
3415 			    generation, pmap->pm_generation));
3416 			info->func(pmap, info, pte_pv, pt_pv, 0,
3417 				   info->sva, ptep, info->arg);
3418 		} else {
3419 			/*
3420 			 * Check for insertion race
3421 			 */
3422 			if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3423 			    pt_pv) {
3424 				pte_pv = pv_find(pmap,
3425 						 pmap_pte_pindex(info->sva));
3426 				if (pte_pv) {
3427 					pv_drop(pte_pv);
3428 					pv_put(pt_pv);
3429 					kprintf("pmap_scan: RACE1 "
3430 						"%016jx, %016lx\n",
3431 						info->sva, oldpte);
3432 					goto again;
3433 				}
3434 			}
3435 
3436 			/*
3437 			 * Didn't race
3438 			 */
3439 			KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
3440 					   pmap->pmap_bits[PG_V_IDX])) ==
3441 			    pmap->pmap_bits[PG_V_IDX],
3442 			    ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL"
3443 			     "generation %d/%d",
3444 			    *ptep, oldpte, info->sva,
3445 			    generation, pmap->pm_generation));
3446 			info->func(pmap, info, NULL, pt_pv, 0,
3447 			    info->sva, ptep, info->arg);
3448 		}
3449 		if (pt_pv)
3450 			pv_put(pt_pv);
3451 fast_skip:
3452 		pmap_inval_bulk_flush(info->bulk);
3453 		lwkt_reltoken(&pmap->pm_token);
3454 		return;
3455 	}
3456 
3457 	/*
3458 	 * Nominal scan case, RB_SCAN() for PD pages and iterate from
3459 	 * there.
3460 	 */
3461 	info->sva_pd_pindex = pmap_pd_pindex(info->sva);
3462 	info->eva_pd_pindex = pmap_pd_pindex(info->eva + NBPDP - 1);
3463 
3464 	if (info->sva >= VM_MAX_USER_ADDRESS) {
3465 		/*
3466 		 * The kernel does not currently maintain any pv_entry's for
3467 		 * higher-level page tables.
3468 		 */
3469 		bzero(&dummy_pv, sizeof(dummy_pv));
3470 		dummy_pv.pv_pindex = info->sva_pd_pindex;
3471 		spin_lock(&pmap->pm_spin);
3472 		while (dummy_pv.pv_pindex < info->eva_pd_pindex) {
3473 			pmap_scan_callback(&dummy_pv, info);
3474 			++dummy_pv.pv_pindex;
3475 		}
3476 		spin_unlock(&pmap->pm_spin);
3477 	} else {
3478 		/*
3479 		 * User page tables maintain local PML4, PDP, and PD
3480 		 * pv_entry's at the very least.  PT pv's might be
3481 		 * unmanaged and thus not exist.  PTE pv's might be
3482 		 * unmanaged and thus not exist.
3483 		 */
3484 		spin_lock(&pmap->pm_spin);
3485 		pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot,
3486 			pmap_scan_cmp, pmap_scan_callback, info);
3487 		spin_unlock(&pmap->pm_spin);
3488 	}
3489 	pmap_inval_bulk_flush(info->bulk);
3490 	lwkt_reltoken(&pmap->pm_token);
3491 }
3492 
3493 /*
3494  * WARNING! pmap->pm_spin held
3495  */
3496 static int
3497 pmap_scan_cmp(pv_entry_t pv, void *data)
3498 {
3499 	struct pmap_scan_info *info = data;
3500 	if (pv->pv_pindex < info->sva_pd_pindex)
3501 		return(-1);
3502 	if (pv->pv_pindex >= info->eva_pd_pindex)
3503 		return(1);
3504 	return(0);
3505 }
3506 
3507 /*
3508  * WARNING! pmap->pm_spin held
3509  */
3510 static int
3511 pmap_scan_callback(pv_entry_t pv, void *data)
3512 {
3513 	struct pmap_scan_info *info = data;
3514 	struct pmap *pmap = info->pmap;
3515 	pv_entry_t pd_pv;	/* A page directory PV */
3516 	pv_entry_t pt_pv;	/* A page table PV */
3517 	pv_entry_t pte_pv;	/* A page table entry PV */
3518 	pt_entry_t *ptep;
3519 	pt_entry_t oldpte;
3520 	vm_offset_t sva;
3521 	vm_offset_t eva;
3522 	vm_offset_t va_next;
3523 	vm_pindex_t pd_pindex;
3524 	int error;
3525 	int generation;
3526 
3527 	/*
3528 	 * Pull the PD pindex from the pv before releasing the spinlock.
3529 	 *
3530 	 * WARNING: pv is faked for kernel pmap scans.
3531 	 */
3532 	pd_pindex = pv->pv_pindex;
3533 	spin_unlock(&pmap->pm_spin);
3534 	pv = NULL;	/* invalid after spinlock unlocked */
3535 
3536 	/*
3537 	 * Calculate the page range within the PD.  SIMPLE pmaps are
3538 	 * direct-mapped for the entire 2^64 address space.  Normal pmaps
3539 	 * reflect the user and kernel address space which requires
3540 	 * cannonicalization w/regards to converting pd_pindex's back
3541 	 * into addresses.
3542 	 */
3543 	sva = (pd_pindex - NUPTE_TOTAL - NUPT_TOTAL) << PDPSHIFT;
3544 	if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 &&
3545 	    (sva & PML4_SIGNMASK)) {
3546 		sva |= PML4_SIGNMASK;
3547 	}
3548 	eva = sva + NBPDP;	/* can overflow */
3549 	if (sva < info->sva)
3550 		sva = info->sva;
3551 	if (eva < info->sva || eva > info->eva)
3552 		eva = info->eva;
3553 
3554 	/*
3555 	 * NOTE: kernel mappings do not track page table pages, only
3556 	 * 	 terminal pages.
3557 	 *
3558 	 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4.
3559 	 *	 However, for the scan to be efficient we try to
3560 	 *	 cache items top-down.
3561 	 */
3562 	pd_pv = NULL;
3563 	pt_pv = NULL;
3564 
3565 	for (; sva < eva; sva = va_next) {
3566 		if (sva >= VM_MAX_USER_ADDRESS) {
3567 			if (pt_pv) {
3568 				pv_put(pt_pv);
3569 				pt_pv = NULL;
3570 			}
3571 			goto kernel_skip;
3572 		}
3573 
3574 		/*
3575 		 * PD cache (degenerate case if we skip).  It is possible
3576 		 * for the PD to not exist due to races.  This is ok.
3577 		 */
3578 		if (pd_pv == NULL) {
3579 			pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3580 		} else if (pd_pv->pv_pindex != pmap_pd_pindex(sva)) {
3581 			pv_put(pd_pv);
3582 			pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3583 		}
3584 		if (pd_pv == NULL) {
3585 			va_next = (sva + NBPDP) & ~PDPMASK;
3586 			if (va_next < sva)
3587 				va_next = eva;
3588 			continue;
3589 		}
3590 
3591 		/*
3592 		 * PT cache
3593 		 */
3594 		if (pt_pv == NULL) {
3595 			if (pd_pv) {
3596 				pv_put(pd_pv);
3597 				pd_pv = NULL;
3598 			}
3599 			pt_pv = pv_get(pmap, pmap_pt_pindex(sva));
3600 		} else if (pt_pv->pv_pindex != pmap_pt_pindex(sva)) {
3601 			if (pd_pv) {
3602 				pv_put(pd_pv);
3603 				pd_pv = NULL;
3604 			}
3605 			pv_put(pt_pv);
3606 			pt_pv = pv_get(pmap, pmap_pt_pindex(sva));
3607 		}
3608 
3609 		/*
3610 		 * If pt_pv is NULL we either have an shared page table
3611 		 * page and must issue a callback specific to that case,
3612 		 * or there is no page table page.
3613 		 *
3614 		 * Either way we can skip the page table page.
3615 		 */
3616 		if (pt_pv == NULL) {
3617 			/*
3618 			 * Possible unmanaged (shared from another pmap)
3619 			 * page table page.
3620 			 */
3621 			if (pd_pv == NULL)
3622 				pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3623 			KKASSERT(pd_pv != NULL);
3624 			ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva));
3625 			if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
3626 				info->func(pmap, info, NULL, pd_pv, 1,
3627 					   sva, ptep, info->arg);
3628 			}
3629 
3630 			/*
3631 			 * Done, move to next page table page.
3632 			 */
3633 			va_next = (sva + NBPDR) & ~PDRMASK;
3634 			if (va_next < sva)
3635 				va_next = eva;
3636 			continue;
3637 		}
3638 
3639 		/*
3640 		 * From this point in the loop testing pt_pv for non-NULL
3641 		 * means we are in UVM, else if it is NULL we are in KVM.
3642 		 *
3643 		 * Limit our scan to either the end of the va represented
3644 		 * by the current page table page, or to the end of the
3645 		 * range being removed.
3646 		 */
3647 kernel_skip:
3648 		va_next = (sva + NBPDR) & ~PDRMASK;
3649 		if (va_next < sva)
3650 			va_next = eva;
3651 		if (va_next > eva)
3652 			va_next = eva;
3653 
3654 		/*
3655 		 * Scan the page table for pages.  Some pages may not be
3656 		 * managed (might not have a pv_entry).
3657 		 *
3658 		 * There is no page table management for kernel pages so
3659 		 * pt_pv will be NULL in that case, but otherwise pt_pv
3660 		 * is non-NULL, locked, and referenced.
3661 		 */
3662 
3663 		/*
3664 		 * At this point a non-NULL pt_pv means a UVA, and a NULL
3665 		 * pt_pv means a KVA.
3666 		 */
3667 		if (pt_pv)
3668 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva));
3669 		else
3670 			ptep = vtopte(sva);
3671 
3672 		while (sva < va_next) {
3673 			/*
3674 			 * Yield every 64 pages.
3675 			 */
3676 			if ((++info->count & 63) == 0)
3677 				lwkt_user_yield();
3678 
3679 			/*
3680 			 * Acquire the related pte_pv, if any.  If *ptep == 0
3681 			 * the related pte_pv should not exist, but if *ptep
3682 			 * is not zero the pte_pv may or may not exist (e.g.
3683 			 * will not exist for an unmanaged page).
3684 			 *
3685 			 * However a multitude of races are possible here.
3686 			 *
3687 			 * In addition, the (pt_pv, pte_pv) lock order is
3688 			 * backwards, so we have to be careful in aquiring
3689 			 * a properly locked pte_pv.
3690 			 */
3691 			generation = pmap->pm_generation;
3692 			if (pt_pv) {
3693 				pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva),
3694 						    &error);
3695 				if (error) {
3696 					if (pd_pv) {
3697 						pv_put(pd_pv);
3698 						pd_pv = NULL;
3699 					}
3700 					pv_put(pt_pv);	 /* must be non-NULL */
3701 					pt_pv = NULL;
3702 					pv_lock(pte_pv); /* safe to block now */
3703 					pv_put(pte_pv);
3704 					pte_pv = NULL;
3705 					pt_pv = pv_get(pmap,
3706 						       pmap_pt_pindex(sva));
3707 					/*
3708 					 * pt_pv reloaded, need new ptep
3709 					 */
3710 					KKASSERT(pt_pv != NULL);
3711 					ptep = pv_pte_lookup(pt_pv,
3712 							pmap_pte_index(sva));
3713 					continue;
3714 				}
3715 			} else {
3716 				pte_pv = pv_get(pmap, pmap_pte_pindex(sva));
3717 			}
3718 
3719 			/*
3720 			 * Ok, if *ptep == 0 we had better NOT have a pte_pv.
3721 			 */
3722 			oldpte = *ptep;
3723 			if (oldpte == 0) {
3724 				if (pte_pv) {
3725 					kprintf("Unexpected non-NULL pte_pv "
3726 						"%p pt_pv %p "
3727 						"*ptep = %016lx/%016lx\n",
3728 						pte_pv, pt_pv, *ptep, oldpte);
3729 					panic("Unexpected non-NULL pte_pv");
3730 				}
3731 				sva += PAGE_SIZE;
3732 				++ptep;
3733 				continue;
3734 			}
3735 
3736 			/*
3737 			 * Ready for the callback.  The locked pte_pv (if any)
3738 			 * is consumed by the callback.  pte_pv will exist if
3739 			 *  the page is managed, and will not exist if it
3740 			 * isn't.
3741 			 */
3742 			if (pte_pv) {
3743 				KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) ==
3744 				    (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX]),
3745 				    ("badC *ptep %016lx/%016lx sva %016lx "
3746 				    "pte_pv %p pm_generation %d/%d",
3747 				    *ptep, oldpte, sva, pte_pv,
3748 				    generation, pmap->pm_generation));
3749 				info->func(pmap, info, pte_pv, pt_pv, 0,
3750 				    sva, ptep, info->arg);
3751 			} else {
3752 				/*
3753 				 * Check for insertion race.  Since there is no
3754 				 * pte_pv to guard us it is possible for us
3755 				 * to race another thread doing an insertion.
3756 				 * Our lookup misses the pte_pv but our *ptep
3757 				 * check sees the inserted pte.
3758 				 *
3759 				 * XXX panic case seems to occur within a
3760 				 * vm_fork() of /bin/sh, which frankly
3761 				 * shouldn't happen since no other threads
3762 				 * should be inserting to our pmap in that
3763 				 * situation.  Removing, possibly.  Inserting,
3764 				 * shouldn't happen.
3765 				 */
3766 				if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3767 				    pt_pv) {
3768 					pte_pv = pv_find(pmap,
3769 							 pmap_pte_pindex(sva));
3770 					if (pte_pv) {
3771 						pv_drop(pte_pv);
3772 						kprintf("pmap_scan: RACE2 "
3773 							"%016jx, %016lx\n",
3774 							sva, oldpte);
3775 						continue;
3776 					}
3777 				}
3778 
3779 				/*
3780 				 * Didn't race
3781 				 */
3782 				KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) ==
3783 				    pmap->pmap_bits[PG_V_IDX],
3784 				    ("badD *ptep %016lx/%016lx sva %016lx "
3785 				    "pte_pv NULL pm_generation %d/%d",
3786 				     *ptep, oldpte, sva,
3787 				     generation, pmap->pm_generation));
3788 				info->func(pmap, info, NULL, pt_pv, 0,
3789 				    sva, ptep, info->arg);
3790 			}
3791 			pte_pv = NULL;
3792 			sva += PAGE_SIZE;
3793 			++ptep;
3794 		}
3795 	}
3796 	if (pd_pv) {
3797 		pv_put(pd_pv);
3798 		pd_pv = NULL;
3799 	}
3800 	if (pt_pv) {
3801 		pv_put(pt_pv);
3802 		pt_pv = NULL;
3803 	}
3804 	if ((++info->count & 7) == 0)
3805 		lwkt_user_yield();
3806 
3807 	/*
3808 	 * Relock before returning.
3809 	 */
3810 	spin_lock(&pmap->pm_spin);
3811 	return (0);
3812 }
3813 
3814 void
3815 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
3816 {
3817 	struct pmap_scan_info info;
3818 
3819 	info.pmap = pmap;
3820 	info.sva = sva;
3821 	info.eva = eva;
3822 	info.func = pmap_remove_callback;
3823 	info.arg = NULL;
3824 	pmap_scan(&info, 1);
3825 }
3826 
3827 static void
3828 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
3829 {
3830 	struct pmap_scan_info info;
3831 
3832 	info.pmap = pmap;
3833 	info.sva = sva;
3834 	info.eva = eva;
3835 	info.func = pmap_remove_callback;
3836 	info.arg = NULL;
3837 	pmap_scan(&info, 0);
3838 }
3839 
3840 static void
3841 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
3842 		     pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
3843 		     vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
3844 {
3845 	pt_entry_t pte;
3846 
3847 	if (pte_pv) {
3848 		/*
3849 		 * This will also drop pt_pv's wire_count. Note that
3850 		 * terminal pages are not wired based on mmu presence.
3851 		 */
3852 		pmap_remove_pv_pte(pte_pv, pt_pv, info->bulk);
3853 		pmap_remove_pv_page(pte_pv);
3854 		pv_free(pte_pv);
3855 	} else if (sharept == 0) {
3856 		/*
3857 		 * Unmanaged page table (pt, pd, or pdp. Not pte).
3858 		 *
3859 		 * pt_pv's wire_count is still bumped by unmanaged pages
3860 		 * so we must decrement it manually.
3861 		 *
3862 		 * We have to unwire the target page table page.
3863 		 *
3864 		 * It is unclear how we can invalidate a segment so we
3865 		 * invalidate -1 which invlidates the tlb.
3866 		 */
3867 		pte = pmap_inval_bulk(info->bulk, (vm_offset_t)-1, ptep, 0);
3868 		if (pte & pmap->pmap_bits[PG_W_IDX])
3869 			atomic_add_long(&pmap->pm_stats.wired_count, -1);
3870 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
3871 		if (vm_page_unwire_quick(pt_pv->pv_m))
3872 			panic("pmap_remove: insufficient wirecount");
3873 	} else {
3874 		/*
3875 		 * Unmanaged page table (pt, pd, or pdp. Not pte) for
3876 		 * a shared page table.
3877 		 *
3878 		 * pt_pv is actually the pd_pv for our pmap (not the shared
3879 		 * object pmap).
3880 		 *
3881 		 * We have to unwire the target page table page and we
3882 		 * have to unwire our page directory page.
3883 		 *
3884 		 * It is unclear how we can invalidate a segment so we
3885 		 * invalidate -1 which invlidates the tlb.
3886 		 */
3887 		pte = pmap_inval_bulk(info->bulk, (vm_offset_t)-1, ptep, 0);
3888 		atomic_add_long(&pmap->pm_stats.resident_count, -1);
3889 		KKASSERT((pte & pmap->pmap_bits[PG_DEVICE_IDX]) == 0);
3890 		if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
3891 			panic("pmap_remove: shared pgtable1 bad wirecount");
3892 		if (vm_page_unwire_quick(pt_pv->pv_m))
3893 			panic("pmap_remove: shared pgtable2 bad wirecount");
3894 	}
3895 }
3896 
3897 /*
3898  * Removes this physical page from all physical maps in which it resides.
3899  * Reflects back modify bits to the pager.
3900  *
3901  * This routine may not be called from an interrupt.
3902  */
3903 static
3904 void
3905 pmap_remove_all(vm_page_t m)
3906 {
3907 	pv_entry_t pv;
3908 	pmap_inval_bulk_t bulk;
3909 
3910 	if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
3911 		return;
3912 
3913 	vm_page_spin_lock(m);
3914 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3915 		KKASSERT(pv->pv_m == m);
3916 		if (pv_hold_try(pv)) {
3917 			vm_page_spin_unlock(m);
3918 		} else {
3919 			vm_page_spin_unlock(m);
3920 			pv_lock(pv);
3921 		}
3922 		if (pv->pv_m != m) {
3923 			pv_put(pv);
3924 			vm_page_spin_lock(m);
3925 			continue;
3926 		}
3927 
3928 		/*
3929 		 * Holding no spinlocks, pv is locked.
3930 		 */
3931 		pmap_inval_bulk_init(&bulk, pv->pv_pmap);
3932 		pmap_remove_pv_pte(pv, NULL, &bulk);
3933 		pmap_inval_bulk_flush(&bulk);
3934 		pmap_remove_pv_page(pv);
3935 		pv_free(pv);
3936 		vm_page_spin_lock(m);
3937 	}
3938 	KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
3939 	vm_page_spin_unlock(m);
3940 }
3941 
3942 /*
3943  * Set the physical protection on the specified range of this map
3944  * as requested.  This function is typically only used for debug watchpoints
3945  * and COW pages.
3946  *
3947  * This function may not be called from an interrupt if the map is
3948  * not the kernel_pmap.
3949  *
3950  * NOTE!  For shared page table pages we just unmap the page.
3951  */
3952 void
3953 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3954 {
3955 	struct pmap_scan_info info;
3956 	/* JG review for NX */
3957 
3958 	if (pmap == NULL)
3959 		return;
3960 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
3961 		pmap_remove(pmap, sva, eva);
3962 		return;
3963 	}
3964 	if (prot & VM_PROT_WRITE)
3965 		return;
3966 	info.pmap = pmap;
3967 	info.sva = sva;
3968 	info.eva = eva;
3969 	info.func = pmap_protect_callback;
3970 	info.arg = &prot;
3971 	pmap_scan(&info, 1);
3972 }
3973 
3974 static
3975 void
3976 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
3977 		      pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
3978 		      vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
3979 {
3980 	pt_entry_t pbits;
3981 	pt_entry_t cbits;
3982 	pt_entry_t pte;
3983 	vm_page_t m;
3984 
3985 again:
3986 	pbits = *ptep;
3987 	cbits = pbits;
3988 	if (pte_pv) {
3989 		m = NULL;
3990 		if (pbits & pmap->pmap_bits[PG_A_IDX]) {
3991 			if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
3992 				m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
3993 				KKASSERT(m == pte_pv->pv_m);
3994 				vm_page_flag_set(m, PG_REFERENCED);
3995 			}
3996 			cbits &= ~pmap->pmap_bits[PG_A_IDX];
3997 		}
3998 		if (pbits & pmap->pmap_bits[PG_M_IDX]) {
3999 			if (pmap_track_modified(pte_pv->pv_pindex)) {
4000 				if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
4001 					if (m == NULL) {
4002 						m = PHYS_TO_VM_PAGE(pbits &
4003 								    PG_FRAME);
4004 					}
4005 					vm_page_dirty(m);
4006 				}
4007 				cbits &= ~pmap->pmap_bits[PG_M_IDX];
4008 			}
4009 		}
4010 	} else if (sharept) {
4011 		/*
4012 		 * Unmanaged page table, pt_pv is actually the pd_pv
4013 		 * for our pmap (not the object's shared pmap).
4014 		 *
4015 		 * When asked to protect something in a shared page table
4016 		 * page we just unmap the page table page.  We have to
4017 		 * invalidate the tlb in this situation.
4018 		 *
4019 		 * XXX Warning, shared page tables will not be used for
4020 		 * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings
4021 		 * so PHYS_TO_VM_PAGE() should be safe here.
4022 		 */
4023 		pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, ptep, 0);
4024 		if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
4025 			panic("pmap_protect: pgtable1 pg bad wirecount");
4026 		if (vm_page_unwire_quick(pt_pv->pv_m))
4027 			panic("pmap_protect: pgtable2 pg bad wirecount");
4028 		ptep = NULL;
4029 	}
4030 	/* else unmanaged page, adjust bits, no wire changes */
4031 
4032 	if (ptep) {
4033 		cbits &= ~pmap->pmap_bits[PG_RW_IDX];
4034 #ifdef PMAP_DEBUG2
4035 		if (pmap_enter_debug > 0) {
4036 			--pmap_enter_debug;
4037 			kprintf("pmap_protect va=%lx ptep=%p pte_pv=%p "
4038 				"pt_pv=%p cbits=%08lx\n",
4039 				va, ptep, pte_pv,
4040 				pt_pv, cbits
4041 			);
4042 		}
4043 #endif
4044 		if (pbits != cbits) {
4045 			if (!pmap_inval_smp_cmpset(pmap, (vm_offset_t)-1,
4046 						   ptep, pbits, cbits)) {
4047 				goto again;
4048 			}
4049 		}
4050 	}
4051 	if (pte_pv)
4052 		pv_put(pte_pv);
4053 }
4054 
4055 /*
4056  * Insert the vm_page (m) at the virtual address (va), replacing any prior
4057  * mapping at that address.  Set protection and wiring as requested.
4058  *
4059  * If entry is non-NULL we check to see if the SEG_SIZE optimization is
4060  * possible.  If it is we enter the page into the appropriate shared pmap
4061  * hanging off the related VM object instead of the passed pmap, then we
4062  * share the page table page from the VM object's pmap into the current pmap.
4063  *
4064  * NOTE: This routine MUST insert the page into the pmap now, it cannot
4065  *	 lazy-evaluate.
4066  */
4067 void
4068 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4069 	   boolean_t wired, vm_map_entry_t entry)
4070 {
4071 	pv_entry_t pt_pv;	/* page table */
4072 	pv_entry_t pte_pv;	/* page table entry */
4073 	pt_entry_t *ptep;
4074 	vm_paddr_t opa;
4075 	pt_entry_t origpte, newpte;
4076 	vm_paddr_t pa;
4077 
4078 	if (pmap == NULL)
4079 		return;
4080 	va = trunc_page(va);
4081 #ifdef PMAP_DIAGNOSTIC
4082 	if (va >= KvaEnd)
4083 		panic("pmap_enter: toobig");
4084 	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
4085 		panic("pmap_enter: invalid to pmap_enter page table "
4086 		      "pages (va: 0x%lx)", va);
4087 #endif
4088 	if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
4089 		kprintf("Warning: pmap_enter called on UVA with "
4090 			"kernel_pmap\n");
4091 #ifdef DDB
4092 		db_print_backtrace();
4093 #endif
4094 	}
4095 	if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
4096 		kprintf("Warning: pmap_enter called on KVA without"
4097 			"kernel_pmap\n");
4098 #ifdef DDB
4099 		db_print_backtrace();
4100 #endif
4101 	}
4102 
4103 	/*
4104 	 * Get locked PV entries for our new page table entry (pte_pv)
4105 	 * and for its parent page table (pt_pv).  We need the parent
4106 	 * so we can resolve the location of the ptep.
4107 	 *
4108 	 * Only hardware MMU actions can modify the ptep out from
4109 	 * under us.
4110 	 *
4111 	 * if (m) is fictitious or unmanaged we do not create a managing
4112 	 * pte_pv for it.  Any pre-existing page's management state must
4113 	 * match (avoiding code complexity).
4114 	 *
4115 	 * If the pmap is still being initialized we assume existing
4116 	 * page tables.
4117 	 *
4118 	 * Kernel mapppings do not track page table pages (i.e. pt_pv).
4119 	 */
4120 	if (pmap_initialized == FALSE) {
4121 		pte_pv = NULL;
4122 		pt_pv = NULL;
4123 		ptep = vtopte(va);
4124 		origpte = *ptep;
4125 	} else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */
4126 		pte_pv = NULL;
4127 		if (va >= VM_MAX_USER_ADDRESS) {
4128 			pt_pv = NULL;
4129 			ptep = vtopte(va);
4130 		} else {
4131 			pt_pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va),
4132 						  NULL, entry, va);
4133 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
4134 		}
4135 		origpte = *ptep;
4136 		cpu_ccfence();
4137 		KASSERT(origpte == 0 ||
4138 			 (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0,
4139 			 ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
4140 	} else {
4141 		if (va >= VM_MAX_USER_ADDRESS) {
4142 			/*
4143 			 * Kernel map, pv_entry-tracked.
4144 			 */
4145 			pt_pv = NULL;
4146 			pte_pv = pmap_allocpte(pmap, pmap_pte_pindex(va), NULL);
4147 			ptep = vtopte(va);
4148 		} else {
4149 			/*
4150 			 * User map
4151 			 */
4152 			pte_pv = pmap_allocpte_seg(pmap, pmap_pte_pindex(va),
4153 						   &pt_pv, entry, va);
4154 			ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
4155 		}
4156 		origpte = *ptep;
4157 		cpu_ccfence();
4158 		KASSERT(origpte == 0 ||
4159 			 (origpte & pmap->pmap_bits[PG_MANAGED_IDX]),
4160 			 ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
4161 	}
4162 
4163 	pa = VM_PAGE_TO_PHYS(m);
4164 	opa = origpte & PG_FRAME;
4165 
4166 	newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) |
4167 		 pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]);
4168 	if (wired)
4169 		newpte |= pmap->pmap_bits[PG_W_IDX];
4170 	if (va < VM_MAX_USER_ADDRESS)
4171 		newpte |= pmap->pmap_bits[PG_U_IDX];
4172 	if (pte_pv)
4173 		newpte |= pmap->pmap_bits[PG_MANAGED_IDX];
4174 //	if (pmap == &kernel_pmap)
4175 //		newpte |= pgeflag;
4176 	newpte |= pmap->pmap_cache_bits[m->pat_mode];
4177 	if (m->flags & PG_FICTITIOUS)
4178 		newpte |= pmap->pmap_bits[PG_DEVICE_IDX];
4179 
4180 	/*
4181 	 * It is possible for multiple faults to occur in threaded
4182 	 * environments, the existing pte might be correct.
4183 	 */
4184 	if (((origpte ^ newpte) & ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] |
4185 	    pmap->pmap_bits[PG_A_IDX])) == 0)
4186 		goto done;
4187 
4188 	/*
4189 	 * Ok, either the address changed or the protection or wiring
4190 	 * changed.
4191 	 *
4192 	 * Clear the current entry, interlocking the removal.  For managed
4193 	 * pte's this will also flush the modified state to the vm_page.
4194 	 * Atomic ops are mandatory in order to ensure that PG_M events are
4195 	 * not lost during any transition.
4196 	 *
4197 	 * WARNING: The caller has busied the new page but not the original
4198 	 *	    vm_page which we are trying to replace.  Because we hold
4199 	 *	    the pte_pv lock, but have not busied the page, PG bits
4200 	 *	    can be cleared out from under us.
4201 	 */
4202 	if (opa) {
4203 		if (pte_pv) {
4204 			/*
4205 			 * pmap_remove_pv_pte() unwires pt_pv and assumes
4206 			 * we will free pte_pv, but since we are reusing
4207 			 * pte_pv we want to retain the wire count.
4208 			 *
4209 			 * pt_pv won't exist for a kernel page (managed or
4210 			 * otherwise).
4211 			 */
4212 			if (pt_pv)
4213 				vm_page_wire_quick(pt_pv->pv_m);
4214 			if (prot & VM_PROT_NOSYNC) {
4215 				pmap_remove_pv_pte(pte_pv, pt_pv, NULL);
4216 			} else {
4217 				pmap_inval_bulk_t bulk;
4218 
4219 				pmap_inval_bulk_init(&bulk, pmap);
4220 				pmap_remove_pv_pte(pte_pv, pt_pv, &bulk);
4221 				pmap_inval_bulk_flush(&bulk);
4222 			}
4223 			if (pte_pv->pv_m)
4224 				pmap_remove_pv_page(pte_pv);
4225 		} else if (prot & VM_PROT_NOSYNC) {
4226 			/*
4227 			 * Unmanaged page, NOSYNC (no mmu sync) requested.
4228 			 *
4229 			 * Leave wire count on PT page intact.
4230 			 */
4231 			(void)pte_load_clear(ptep);
4232 			cpu_invlpg((void *)va);
4233 			atomic_add_long(&pmap->pm_stats.resident_count, -1);
4234 		} else {
4235 			/*
4236 			 * Unmanaged page, normal enter.
4237 			 *
4238 			 * Leave wire count on PT page intact.
4239 			 */
4240 			pmap_inval_smp(pmap, va, 1, ptep, 0);
4241 			atomic_add_long(&pmap->pm_stats.resident_count, -1);
4242 		}
4243 		KKASSERT(*ptep == 0);
4244 	}
4245 
4246 #ifdef PMAP_DEBUG2
4247 	if (pmap_enter_debug > 0) {
4248 		--pmap_enter_debug;
4249 		kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p"
4250 			" pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n",
4251 			va, m,
4252 			origpte, newpte, ptep,
4253 			pte_pv, pt_pv, opa, prot);
4254 	}
4255 #endif
4256 
4257 	if (pte_pv) {
4258 		/*
4259 		 * Enter on the PV list if part of our managed memory.
4260 		 * Wiring of the PT page is already handled.
4261 		 */
4262 		KKASSERT(pte_pv->pv_m == NULL);
4263 		vm_page_spin_lock(m);
4264 		pte_pv->pv_m = m;
4265 		pmap_page_stats_adding(m);
4266 		TAILQ_INSERT_TAIL(&m->md.pv_list, pte_pv, pv_list);
4267 		vm_page_flag_set(m, PG_MAPPED);
4268 		vm_page_spin_unlock(m);
4269 	} else if (pt_pv && opa == 0) {
4270 		/*
4271 		 * We have to adjust the wire count on the PT page ourselves
4272 		 * for unmanaged entries.  If opa was non-zero we retained
4273 		 * the existing wire count from the removal.
4274 		 */
4275 		vm_page_wire_quick(pt_pv->pv_m);
4276 	}
4277 
4278 	/*
4279 	 * Kernel VMAs (pt_pv == NULL) require pmap invalidation interlocks.
4280 	 *
4281 	 * User VMAs do not because those will be zero->non-zero, so no
4282 	 * stale entries to worry about at this point.
4283 	 *
4284 	 * For KVM there appear to still be issues.  Theoretically we
4285 	 * should be able to scrap the interlocks entirely but we
4286 	 * get crashes.
4287 	 */
4288 	if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL) {
4289 		pmap_inval_smp(pmap, va, 1, ptep, newpte);
4290 	} else {
4291 		*(volatile pt_entry_t *)ptep = newpte;
4292 		if (pt_pv == NULL)
4293 			cpu_invlpg((void *)va);
4294 	}
4295 
4296 	if (wired) {
4297 		if (pte_pv) {
4298 			atomic_add_long(&pte_pv->pv_pmap->pm_stats.wired_count,
4299 					1);
4300 		} else {
4301 			atomic_add_long(&pmap->pm_stats.wired_count, 1);
4302 		}
4303 	}
4304 	if (newpte & pmap->pmap_bits[PG_RW_IDX])
4305 		vm_page_flag_set(m, PG_WRITEABLE);
4306 
4307 	/*
4308 	 * Unmanaged pages need manual resident_count tracking.
4309 	 */
4310 	if (pte_pv == NULL && pt_pv)
4311 		atomic_add_long(&pt_pv->pv_pmap->pm_stats.resident_count, 1);
4312 
4313 	/*
4314 	 * Cleanup
4315 	 */
4316 done:
4317 	KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 ||
4318 		 (m->flags & PG_MAPPED));
4319 
4320 	/*
4321 	 * Cleanup the pv entry, allowing other accessors.
4322 	 */
4323 	if (pte_pv)
4324 		pv_put(pte_pv);
4325 	if (pt_pv)
4326 		pv_put(pt_pv);
4327 }
4328 
4329 /*
4330  * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
4331  * This code also assumes that the pmap has no pre-existing entry for this
4332  * VA.
4333  *
4334  * This code currently may only be used on user pmaps, not kernel_pmap.
4335  */
4336 void
4337 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
4338 {
4339 	pmap_enter(pmap, va, m, VM_PROT_READ, FALSE, NULL);
4340 }
4341 
4342 /*
4343  * Make a temporary mapping for a physical address.  This is only intended
4344  * to be used for panic dumps.
4345  *
4346  * The caller is responsible for calling smp_invltlb().
4347  */
4348 void *
4349 pmap_kenter_temporary(vm_paddr_t pa, long i)
4350 {
4351 	pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
4352 	return ((void *)crashdumpmap);
4353 }
4354 
4355 #define MAX_INIT_PT (96)
4356 
4357 /*
4358  * This routine preloads the ptes for a given object into the specified pmap.
4359  * This eliminates the blast of soft faults on process startup and
4360  * immediately after an mmap.
4361  */
4362 static int pmap_object_init_pt_callback(vm_page_t p, void *data);
4363 
4364 void
4365 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
4366 		    vm_object_t object, vm_pindex_t pindex,
4367 		    vm_size_t size, int limit)
4368 {
4369 	struct rb_vm_page_scan_info info;
4370 	struct lwp *lp;
4371 	vm_size_t psize;
4372 
4373 	/*
4374 	 * We can't preinit if read access isn't set or there is no pmap
4375 	 * or object.
4376 	 */
4377 	if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
4378 		return;
4379 
4380 	/*
4381 	 * We can't preinit if the pmap is not the current pmap
4382 	 */
4383 	lp = curthread->td_lwp;
4384 	if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
4385 		return;
4386 
4387 	/*
4388 	 * Misc additional checks
4389 	 */
4390 	psize = x86_64_btop(size);
4391 
4392 	if ((object->type != OBJT_VNODE) ||
4393 		((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
4394 			(object->resident_page_count > MAX_INIT_PT))) {
4395 		return;
4396 	}
4397 
4398 	if (pindex + psize > object->size) {
4399 		if (object->size < pindex)
4400 			return;
4401 		psize = object->size - pindex;
4402 	}
4403 
4404 	if (psize == 0)
4405 		return;
4406 
4407 	/*
4408 	 * If everything is segment-aligned do not pre-init here.  Instead
4409 	 * allow the normal vm_fault path to pass a segment hint to
4410 	 * pmap_enter() which will then use an object-referenced shared
4411 	 * page table page.
4412 	 */
4413 	if ((addr & SEG_MASK) == 0 &&
4414 	    (ctob(psize) & SEG_MASK) == 0 &&
4415 	    (ctob(pindex) & SEG_MASK) == 0) {
4416 		return;
4417 	}
4418 
4419 	/*
4420 	 * Use a red-black scan to traverse the requested range and load
4421 	 * any valid pages found into the pmap.
4422 	 *
4423 	 * We cannot safely scan the object's memq without holding the
4424 	 * object token.
4425 	 */
4426 	info.start_pindex = pindex;
4427 	info.end_pindex = pindex + psize - 1;
4428 	info.limit = limit;
4429 	info.mpte = NULL;
4430 	info.addr = addr;
4431 	info.pmap = pmap;
4432 
4433 	vm_object_hold_shared(object);
4434 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
4435 				pmap_object_init_pt_callback, &info);
4436 	vm_object_drop(object);
4437 }
4438 
4439 static
4440 int
4441 pmap_object_init_pt_callback(vm_page_t p, void *data)
4442 {
4443 	struct rb_vm_page_scan_info *info = data;
4444 	vm_pindex_t rel_index;
4445 
4446 	/*
4447 	 * don't allow an madvise to blow away our really
4448 	 * free pages allocating pv entries.
4449 	 */
4450 	if ((info->limit & MAP_PREFAULT_MADVISE) &&
4451 		vmstats.v_free_count < vmstats.v_free_reserved) {
4452 		    return(-1);
4453 	}
4454 
4455 	/*
4456 	 * Ignore list markers and ignore pages we cannot instantly
4457 	 * busy (while holding the object token).
4458 	 */
4459 	if (p->flags & PG_MARKER)
4460 		return 0;
4461 	if (vm_page_busy_try(p, TRUE))
4462 		return 0;
4463 	if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
4464 	    (p->flags & PG_FICTITIOUS) == 0) {
4465 		if ((p->queue - p->pc) == PQ_CACHE)
4466 			vm_page_deactivate(p);
4467 		rel_index = p->pindex - info->start_pindex;
4468 		pmap_enter_quick(info->pmap,
4469 				 info->addr + x86_64_ptob(rel_index), p);
4470 	}
4471 	vm_page_wakeup(p);
4472 	lwkt_yield();
4473 	return(0);
4474 }
4475 
4476 /*
4477  * Return TRUE if the pmap is in shape to trivially pre-fault the specified
4478  * address.
4479  *
4480  * Returns FALSE if it would be non-trivial or if a pte is already loaded
4481  * into the slot.
4482  *
4483  * XXX This is safe only because page table pages are not freed.
4484  */
4485 int
4486 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
4487 {
4488 	pt_entry_t *pte;
4489 
4490 	/*spin_lock(&pmap->pm_spin);*/
4491 	if ((pte = pmap_pte(pmap, addr)) != NULL) {
4492 		if (*pte & pmap->pmap_bits[PG_V_IDX]) {
4493 			/*spin_unlock(&pmap->pm_spin);*/
4494 			return FALSE;
4495 		}
4496 	}
4497 	/*spin_unlock(&pmap->pm_spin);*/
4498 	return TRUE;
4499 }
4500 
4501 /*
4502  * Change the wiring attribute for a pmap/va pair.  The mapping must already
4503  * exist in the pmap.  The mapping may or may not be managed.
4504  */
4505 void
4506 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired,
4507 		   vm_map_entry_t entry)
4508 {
4509 	pt_entry_t *ptep;
4510 	pv_entry_t pv;
4511 
4512 	if (pmap == NULL)
4513 		return;
4514 	lwkt_gettoken(&pmap->pm_token);
4515 	pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va), NULL, entry, va);
4516 	ptep = pv_pte_lookup(pv, pmap_pte_index(va));
4517 
4518 	if (wired && !pmap_pte_w(pmap, ptep))
4519 		atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, 1);
4520 	else if (!wired && pmap_pte_w(pmap, ptep))
4521 		atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, -1);
4522 
4523 	/*
4524 	 * Wiring is not a hardware characteristic so there is no need to
4525 	 * invalidate TLB.  However, in an SMP environment we must use
4526 	 * a locked bus cycle to update the pte (if we are not using
4527 	 * the pmap_inval_*() API that is)... it's ok to do this for simple
4528 	 * wiring changes.
4529 	 */
4530 	if (wired)
4531 		atomic_set_long(ptep, pmap->pmap_bits[PG_W_IDX]);
4532 	else
4533 		atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
4534 	pv_put(pv);
4535 	lwkt_reltoken(&pmap->pm_token);
4536 }
4537 
4538 
4539 
4540 /*
4541  * Copy the range specified by src_addr/len from the source map to
4542  * the range dst_addr/len in the destination map.
4543  *
4544  * This routine is only advisory and need not do anything.
4545  */
4546 void
4547 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
4548 	  vm_size_t len, vm_offset_t src_addr)
4549 {
4550 }
4551 
4552 /*
4553  * pmap_zero_page:
4554  *
4555  *	Zero the specified physical page.
4556  *
4557  *	This function may be called from an interrupt and no locking is
4558  *	required.
4559  */
4560 void
4561 pmap_zero_page(vm_paddr_t phys)
4562 {
4563 	vm_offset_t va = PHYS_TO_DMAP(phys);
4564 
4565 	pagezero((void *)va);
4566 }
4567 
4568 /*
4569  * pmap_zero_page:
4570  *
4571  *	Zero part of a physical page by mapping it into memory and clearing
4572  *	its contents with bzero.
4573  *
4574  *	off and size may not cover an area beyond a single hardware page.
4575  */
4576 void
4577 pmap_zero_page_area(vm_paddr_t phys, int off, int size)
4578 {
4579 	vm_offset_t virt = PHYS_TO_DMAP(phys);
4580 
4581 	bzero((char *)virt + off, size);
4582 }
4583 
4584 /*
4585  * pmap_copy_page:
4586  *
4587  *	Copy the physical page from the source PA to the target PA.
4588  *	This function may be called from an interrupt.  No locking
4589  *	is required.
4590  */
4591 void
4592 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
4593 {
4594 	vm_offset_t src_virt, dst_virt;
4595 
4596 	src_virt = PHYS_TO_DMAP(src);
4597 	dst_virt = PHYS_TO_DMAP(dst);
4598 	bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE);
4599 }
4600 
4601 /*
4602  * pmap_copy_page_frag:
4603  *
4604  *	Copy the physical page from the source PA to the target PA.
4605  *	This function may be called from an interrupt.  No locking
4606  *	is required.
4607  */
4608 void
4609 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
4610 {
4611 	vm_offset_t src_virt, dst_virt;
4612 
4613 	src_virt = PHYS_TO_DMAP(src);
4614 	dst_virt = PHYS_TO_DMAP(dst);
4615 
4616 	bcopy((char *)src_virt + (src & PAGE_MASK),
4617 	      (char *)dst_virt + (dst & PAGE_MASK),
4618 	      bytes);
4619 }
4620 
4621 /*
4622  * Returns true if the pmap's pv is one of the first 16 pvs linked to from
4623  * this page.  This count may be changed upwards or downwards in the future;
4624  * it is only necessary that true be returned for a small subset of pmaps
4625  * for proper page aging.
4626  */
4627 boolean_t
4628 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4629 {
4630 	pv_entry_t pv;
4631 	int loops = 0;
4632 
4633 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4634 		return FALSE;
4635 
4636 	vm_page_spin_lock(m);
4637 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4638 		if (pv->pv_pmap == pmap) {
4639 			vm_page_spin_unlock(m);
4640 			return TRUE;
4641 		}
4642 		loops++;
4643 		if (loops >= 16)
4644 			break;
4645 	}
4646 	vm_page_spin_unlock(m);
4647 	return (FALSE);
4648 }
4649 
4650 /*
4651  * Remove all pages from specified address space this aids process exit
4652  * speeds.  Also, this code may be special cased for the current process
4653  * only.
4654  */
4655 void
4656 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4657 {
4658 	pmap_remove_noinval(pmap, sva, eva);
4659 	cpu_invltlb();
4660 }
4661 
4662 /*
4663  * pmap_testbit tests bits in pte's note that the testbit/clearbit
4664  * routines are inline, and a lot of things compile-time evaluate.
4665  */
4666 static
4667 boolean_t
4668 pmap_testbit(vm_page_t m, int bit)
4669 {
4670 	pv_entry_t pv;
4671 	pt_entry_t *pte;
4672 	pmap_t pmap;
4673 
4674 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4675 		return FALSE;
4676 
4677 	if (TAILQ_FIRST(&m->md.pv_list) == NULL)
4678 		return FALSE;
4679 	vm_page_spin_lock(m);
4680 	if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
4681 		vm_page_spin_unlock(m);
4682 		return FALSE;
4683 	}
4684 
4685 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4686 
4687 #if defined(PMAP_DIAGNOSTIC)
4688 		if (pv->pv_pmap == NULL) {
4689 			kprintf("Null pmap (tb) at pindex: %"PRIu64"\n",
4690 			    pv->pv_pindex);
4691 			continue;
4692 		}
4693 #endif
4694 		pmap = pv->pv_pmap;
4695 
4696 		/*
4697 		 * If the bit being tested is the modified bit, then
4698 		 * mark clean_map and ptes as never
4699 		 * modified.
4700 		 *
4701 		 * WARNING!  Because we do not lock the pv, *pte can be in a
4702 		 *	     state of flux.  Despite this the value of *pte
4703 		 *	     will still be related to the vm_page in some way
4704 		 *	     because the pv cannot be destroyed as long as we
4705 		 *	     hold the vm_page spin lock.
4706 		 */
4707 		if (bit == PG_A_IDX || bit == PG_M_IDX) {
4708 				//& (pmap->pmap_bits[PG_A_IDX] | pmap->pmap_bits[PG_M_IDX])) {
4709 			if (!pmap_track_modified(pv->pv_pindex))
4710 				continue;
4711 		}
4712 
4713 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4714 		if (*pte & pmap->pmap_bits[bit]) {
4715 			vm_page_spin_unlock(m);
4716 			return TRUE;
4717 		}
4718 	}
4719 	vm_page_spin_unlock(m);
4720 	return (FALSE);
4721 }
4722 
4723 /*
4724  * This routine is used to modify bits in ptes.  Only one bit should be
4725  * specified.  PG_RW requires special handling.
4726  *
4727  * Caller must NOT hold any spin locks
4728  */
4729 static __inline
4730 void
4731 pmap_clearbit(vm_page_t m, int bit_index)
4732 {
4733 	pv_entry_t pv;
4734 	pt_entry_t *pte;
4735 	pt_entry_t pbits;
4736 	pmap_t pmap;
4737 
4738 	if (bit_index == PG_RW_IDX)
4739 		vm_page_flag_clear(m, PG_WRITEABLE);
4740 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
4741 		return;
4742 	}
4743 
4744 	/*
4745 	 * PG_M or PG_A case
4746 	 *
4747 	 * Loop over all current mappings setting/clearing as appropos If
4748 	 * setting RO do we need to clear the VAC?
4749 	 *
4750 	 * NOTE: When clearing PG_M we could also (not implemented) drop
4751 	 *	 through to the PG_RW code and clear PG_RW too, forcing
4752 	 *	 a fault on write to redetect PG_M for virtual kernels, but
4753 	 *	 it isn't necessary since virtual kernels invalidate the
4754 	 *	 pte when they clear the VPTE_M bit in their virtual page
4755 	 *	 tables.
4756 	 *
4757 	 * NOTE: Does not re-dirty the page when clearing only PG_M.
4758 	 *
4759 	 * NOTE: Because we do not lock the pv, *pte can be in a state of
4760 	 *	 flux.  Despite this the value of *pte is still somewhat
4761 	 *	 related while we hold the vm_page spin lock.
4762 	 *
4763 	 *	 *pte can be zero due to this race.  Since we are clearing
4764 	 *	 bits we basically do no harm when this race  ccurs.
4765 	 */
4766 	if (bit_index != PG_RW_IDX) {
4767 		vm_page_spin_lock(m);
4768 		TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4769 #if defined(PMAP_DIAGNOSTIC)
4770 			if (pv->pv_pmap == NULL) {
4771 				kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
4772 				    pv->pv_pindex);
4773 				continue;
4774 			}
4775 #endif
4776 			pmap = pv->pv_pmap;
4777 			pte = pmap_pte_quick(pv->pv_pmap,
4778 					     pv->pv_pindex << PAGE_SHIFT);
4779 			pbits = *pte;
4780 			if (pbits & pmap->pmap_bits[bit_index])
4781 				atomic_clear_long(pte, pmap->pmap_bits[bit_index]);
4782 		}
4783 		vm_page_spin_unlock(m);
4784 		return;
4785 	}
4786 
4787 	/*
4788 	 * Clear PG_RW.  Also clears PG_M and marks the page dirty if PG_M
4789 	 * was set.
4790 	 */
4791 restart:
4792 	vm_page_spin_lock(m);
4793 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4794 		/*
4795 		 * don't write protect pager mappings
4796 		 */
4797 		if (!pmap_track_modified(pv->pv_pindex))
4798 			continue;
4799 
4800 #if defined(PMAP_DIAGNOSTIC)
4801 		if (pv->pv_pmap == NULL) {
4802 			kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
4803 			    pv->pv_pindex);
4804 			continue;
4805 		}
4806 #endif
4807 		pmap = pv->pv_pmap;
4808 		/*
4809 		 * Skip pages which do not have PG_RW set.
4810 		 */
4811 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4812 		if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0)
4813 			continue;
4814 
4815 		/*
4816 		 * Lock the PV
4817 		 */
4818 		if (pv_hold_try(pv)) {
4819 			vm_page_spin_unlock(m);
4820 		} else {
4821 			vm_page_spin_unlock(m);
4822 			pv_lock(pv);	/* held, now do a blocking lock */
4823 		}
4824 		if (pv->pv_pmap != pmap || pv->pv_m != m) {
4825 			pv_put(pv);	/* and release */
4826 			goto restart;	/* anything could have happened */
4827 		}
4828 		KKASSERT(pv->pv_pmap == pmap);
4829 		for (;;) {
4830 			pt_entry_t nbits;
4831 
4832 			pbits = *pte;
4833 			cpu_ccfence();
4834 			nbits = pbits & ~(pmap->pmap_bits[PG_RW_IDX] |
4835 					  pmap->pmap_bits[PG_M_IDX]);
4836 			if (pmap_inval_smp_cmpset(pmap,
4837 				     ((vm_offset_t)pv->pv_pindex << PAGE_SHIFT),
4838 				     pte, pbits, nbits)) {
4839 				break;
4840 			}
4841 			cpu_pause();
4842 		}
4843 		vm_page_spin_lock(m);
4844 
4845 		/*
4846 		 * If PG_M was found to be set while we were clearing PG_RW
4847 		 * we also clear PG_M (done above) and mark the page dirty.
4848 		 * Callers expect this behavior.
4849 		 */
4850 		if (pbits & pmap->pmap_bits[PG_M_IDX])
4851 			vm_page_dirty(m);
4852 		pv_put(pv);
4853 	}
4854 	vm_page_spin_unlock(m);
4855 }
4856 
4857 /*
4858  * Lower the permission for all mappings to a given page.
4859  *
4860  * Page must be busied by caller.  Because page is busied by caller this
4861  * should not be able to race a pmap_enter().
4862  */
4863 void
4864 pmap_page_protect(vm_page_t m, vm_prot_t prot)
4865 {
4866 	/* JG NX support? */
4867 	if ((prot & VM_PROT_WRITE) == 0) {
4868 		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
4869 			/*
4870 			 * NOTE: pmap_clearbit(.. PG_RW) also clears
4871 			 *	 the PG_WRITEABLE flag in (m).
4872 			 */
4873 			pmap_clearbit(m, PG_RW_IDX);
4874 		} else {
4875 			pmap_remove_all(m);
4876 		}
4877 	}
4878 }
4879 
4880 vm_paddr_t
4881 pmap_phys_address(vm_pindex_t ppn)
4882 {
4883 	return (x86_64_ptob(ppn));
4884 }
4885 
4886 /*
4887  * Return a count of reference bits for a page, clearing those bits.
4888  * It is not necessary for every reference bit to be cleared, but it
4889  * is necessary that 0 only be returned when there are truly no
4890  * reference bits set.
4891  *
4892  * XXX: The exact number of bits to check and clear is a matter that
4893  * should be tested and standardized at some point in the future for
4894  * optimal aging of shared pages.
4895  *
4896  * This routine may not block.
4897  */
4898 int
4899 pmap_ts_referenced(vm_page_t m)
4900 {
4901 	pv_entry_t pv;
4902 	pt_entry_t *pte;
4903 	pmap_t pmap;
4904 	int rtval = 0;
4905 
4906 	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4907 		return (rtval);
4908 
4909 	vm_page_spin_lock(m);
4910 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4911 		if (!pmap_track_modified(pv->pv_pindex))
4912 			continue;
4913 		pmap = pv->pv_pmap;
4914 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4915 		if (pte && (*pte & pmap->pmap_bits[PG_A_IDX])) {
4916 			atomic_clear_long(pte, pmap->pmap_bits[PG_A_IDX]);
4917 			rtval++;
4918 			if (rtval > 4)
4919 				break;
4920 		}
4921 	}
4922 	vm_page_spin_unlock(m);
4923 	return (rtval);
4924 }
4925 
4926 /*
4927  *	pmap_is_modified:
4928  *
4929  *	Return whether or not the specified physical page was modified
4930  *	in any physical maps.
4931  */
4932 boolean_t
4933 pmap_is_modified(vm_page_t m)
4934 {
4935 	boolean_t res;
4936 
4937 	res = pmap_testbit(m, PG_M_IDX);
4938 	return (res);
4939 }
4940 
4941 /*
4942  *	Clear the modify bits on the specified physical page.
4943  */
4944 void
4945 pmap_clear_modify(vm_page_t m)
4946 {
4947 	pmap_clearbit(m, PG_M_IDX);
4948 }
4949 
4950 /*
4951  *	pmap_clear_reference:
4952  *
4953  *	Clear the reference bit on the specified physical page.
4954  */
4955 void
4956 pmap_clear_reference(vm_page_t m)
4957 {
4958 	pmap_clearbit(m, PG_A_IDX);
4959 }
4960 
4961 /*
4962  * Miscellaneous support routines follow
4963  */
4964 
4965 static
4966 void
4967 i386_protection_init(void)
4968 {
4969 	int *kp, prot;
4970 
4971 	/* JG NX support may go here; No VM_PROT_EXECUTE ==> set NX bit  */
4972 	kp = protection_codes;
4973 	for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) {
4974 		switch (prot) {
4975 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
4976 			/*
4977 			 * Read access is also 0. There isn't any execute bit,
4978 			 * so just make it readable.
4979 			 */
4980 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
4981 		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
4982 		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
4983 			*kp++ = 0;
4984 			break;
4985 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
4986 		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
4987 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
4988 		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
4989 			*kp++ = pmap_bits_default[PG_RW_IDX];
4990 			break;
4991 		}
4992 	}
4993 }
4994 
4995 /*
4996  * Map a set of physical memory pages into the kernel virtual
4997  * address space. Return a pointer to where it is mapped. This
4998  * routine is intended to be used for mapping device memory,
4999  * NOT real memory.
5000  *
5001  * NOTE: We can't use pgeflag unless we invalidate the pages one at
5002  *	 a time.
5003  *
5004  * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE}
5005  *	 work whether the cpu supports PAT or not.  The remaining PAT
5006  *	 attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu
5007  *	 supports PAT.
5008  */
5009 void *
5010 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
5011 {
5012 	return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
5013 }
5014 
5015 void *
5016 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
5017 {
5018 	return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
5019 }
5020 
5021 void *
5022 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
5023 {
5024 	return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
5025 }
5026 
5027 /*
5028  * Map a set of physical memory pages into the kernel virtual
5029  * address space. Return a pointer to where it is mapped. This
5030  * routine is intended to be used for mapping device memory,
5031  * NOT real memory.
5032  */
5033 void *
5034 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
5035 {
5036 	vm_offset_t va, tmpva, offset;
5037 	pt_entry_t *pte;
5038 	vm_size_t tmpsize;
5039 
5040 	offset = pa & PAGE_MASK;
5041 	size = roundup(offset + size, PAGE_SIZE);
5042 
5043 	va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
5044 	if (va == 0)
5045 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
5046 
5047 	pa = pa & ~PAGE_MASK;
5048 	for (tmpva = va, tmpsize = size; tmpsize > 0;) {
5049 		pte = vtopte(tmpva);
5050 		*pte = pa |
5051 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
5052 		    kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */
5053 		    kernel_pmap.pmap_cache_bits[mode];
5054 		tmpsize -= PAGE_SIZE;
5055 		tmpva += PAGE_SIZE;
5056 		pa += PAGE_SIZE;
5057 	}
5058 	pmap_invalidate_range(&kernel_pmap, va, va + size);
5059 	pmap_invalidate_cache_range(va, va + size);
5060 
5061 	return ((void *)(va + offset));
5062 }
5063 
5064 void
5065 pmap_unmapdev(vm_offset_t va, vm_size_t size)
5066 {
5067 	vm_offset_t base, offset;
5068 
5069 	base = va & ~PAGE_MASK;
5070 	offset = va & PAGE_MASK;
5071 	size = roundup(offset + size, PAGE_SIZE);
5072 	pmap_qremove(va, size >> PAGE_SHIFT);
5073 	kmem_free(&kernel_map, base, size);
5074 }
5075 
5076 /*
5077  * Sets the memory attribute for the specified page.
5078  */
5079 void
5080 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5081 {
5082 
5083     m->pat_mode = ma;
5084 
5085     /*
5086      * If "m" is a normal page, update its direct mapping.  This update
5087      * can be relied upon to perform any cache operations that are
5088      * required for data coherence.
5089      */
5090     if ((m->flags & PG_FICTITIOUS) == 0)
5091         pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode);
5092 }
5093 
5094 /*
5095  * Change the PAT attribute on an existing kernel memory map.  Caller
5096  * must ensure that the virtual memory in question is not accessed
5097  * during the adjustment.
5098  */
5099 void
5100 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode)
5101 {
5102 	pt_entry_t *pte;
5103 	vm_offset_t base;
5104 	int changed = 0;
5105 
5106 	if (va == 0)
5107 		panic("pmap_change_attr: va is NULL");
5108 	base = trunc_page(va);
5109 
5110 	while (count) {
5111 		pte = vtopte(va);
5112 		*pte = (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask)) |
5113 		       kernel_pmap.pmap_cache_bits[mode];
5114 		--count;
5115 		va += PAGE_SIZE;
5116 	}
5117 
5118 	changed = 1;	/* XXX: not optimal */
5119 
5120 	/*
5121 	 * Flush CPU caches if required to make sure any data isn't cached that
5122 	 * shouldn't be, etc.
5123 	 */
5124 	if (changed) {
5125 		pmap_invalidate_range(&kernel_pmap, base, va);
5126 		pmap_invalidate_cache_range(base, va);
5127 	}
5128 }
5129 
5130 /*
5131  * perform the pmap work for mincore
5132  */
5133 int
5134 pmap_mincore(pmap_t pmap, vm_offset_t addr)
5135 {
5136 	pt_entry_t *ptep, pte;
5137 	vm_page_t m;
5138 	int val = 0;
5139 
5140 	lwkt_gettoken(&pmap->pm_token);
5141 	ptep = pmap_pte(pmap, addr);
5142 
5143 	if (ptep && (pte = *ptep) != 0) {
5144 		vm_offset_t pa;
5145 
5146 		val = MINCORE_INCORE;
5147 		if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0)
5148 			goto done;
5149 
5150 		pa = pte & PG_FRAME;
5151 
5152 		if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
5153 			m = NULL;
5154 		else
5155 			m = PHYS_TO_VM_PAGE(pa);
5156 
5157 		/*
5158 		 * Modified by us
5159 		 */
5160 		if (pte & pmap->pmap_bits[PG_M_IDX])
5161 			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
5162 		/*
5163 		 * Modified by someone
5164 		 */
5165 		else if (m && (m->dirty || pmap_is_modified(m)))
5166 			val |= MINCORE_MODIFIED_OTHER;
5167 		/*
5168 		 * Referenced by us
5169 		 */
5170 		if (pte & pmap->pmap_bits[PG_A_IDX])
5171 			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
5172 
5173 		/*
5174 		 * Referenced by someone
5175 		 */
5176 		else if (m && ((m->flags & PG_REFERENCED) ||
5177 				pmap_ts_referenced(m))) {
5178 			val |= MINCORE_REFERENCED_OTHER;
5179 			vm_page_flag_set(m, PG_REFERENCED);
5180 		}
5181 	}
5182 done:
5183 	lwkt_reltoken(&pmap->pm_token);
5184 
5185 	return val;
5186 }
5187 
5188 /*
5189  * Replace p->p_vmspace with a new one.  If adjrefs is non-zero the new
5190  * vmspace will be ref'd and the old one will be deref'd.
5191  *
5192  * The vmspace for all lwps associated with the process will be adjusted
5193  * and cr3 will be reloaded if any lwp is the current lwp.
5194  *
5195  * The process must hold the vmspace->vm_map.token for oldvm and newvm
5196  */
5197 void
5198 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
5199 {
5200 	struct vmspace *oldvm;
5201 	struct lwp *lp;
5202 
5203 	oldvm = p->p_vmspace;
5204 	if (oldvm != newvm) {
5205 		if (adjrefs)
5206 			vmspace_ref(newvm);
5207 		p->p_vmspace = newvm;
5208 		KKASSERT(p->p_nthreads == 1);
5209 		lp = RB_ROOT(&p->p_lwp_tree);
5210 		pmap_setlwpvm(lp, newvm);
5211 		if (adjrefs)
5212 			vmspace_rel(oldvm);
5213 	}
5214 }
5215 
5216 /*
5217  * Set the vmspace for a LWP.  The vmspace is almost universally set the
5218  * same as the process vmspace, but virtual kernels need to swap out contexts
5219  * on a per-lwp basis.
5220  *
5221  * Caller does not necessarily hold any vmspace tokens.  Caller must control
5222  * the lwp (typically be in the context of the lwp).  We use a critical
5223  * section to protect against statclock and hardclock (statistics collection).
5224  */
5225 void
5226 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
5227 {
5228 	struct vmspace *oldvm;
5229 	struct pmap *pmap;
5230 
5231 	oldvm = lp->lwp_vmspace;
5232 
5233 	if (oldvm != newvm) {
5234 		crit_enter();
5235 		lp->lwp_vmspace = newvm;
5236 		if (curthread->td_lwp == lp) {
5237 			pmap = vmspace_pmap(newvm);
5238 			ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
5239 			if (pmap->pm_active_lock & CPULOCK_EXCL)
5240 				pmap_interlock_wait(newvm);
5241 #if defined(SWTCH_OPTIM_STATS)
5242 			tlb_flush_count++;
5243 #endif
5244 			if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) {
5245 				curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
5246 			} else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) {
5247 				curthread->td_pcb->pcb_cr3 = KPML4phys;
5248 			} else {
5249 				panic("pmap_setlwpvm: unknown pmap type\n");
5250 			}
5251 			load_cr3(curthread->td_pcb->pcb_cr3);
5252 			pmap = vmspace_pmap(oldvm);
5253 			ATOMIC_CPUMASK_NANDBIT(pmap->pm_active,
5254 					       mycpu->gd_cpuid);
5255 		}
5256 		crit_exit();
5257 	}
5258 }
5259 
5260 /*
5261  * Called when switching to a locked pmap, used to interlock against pmaps
5262  * undergoing modifications to prevent us from activating the MMU for the
5263  * target pmap until all such modifications have completed.  We have to do
5264  * this because the thread making the modifications has already set up its
5265  * SMP synchronization mask.
5266  *
5267  * This function cannot sleep!
5268  *
5269  * No requirements.
5270  */
5271 void
5272 pmap_interlock_wait(struct vmspace *vm)
5273 {
5274 	struct pmap *pmap = &vm->vm_pmap;
5275 
5276 	if (pmap->pm_active_lock & CPULOCK_EXCL) {
5277 		crit_enter();
5278 		KKASSERT(curthread->td_critcount >= 2);
5279 		DEBUG_PUSH_INFO("pmap_interlock_wait");
5280 		while (pmap->pm_active_lock & CPULOCK_EXCL) {
5281 			cpu_ccfence();
5282 			lwkt_process_ipiq();
5283 		}
5284 		DEBUG_POP_INFO();
5285 		crit_exit();
5286 	}
5287 }
5288 
5289 vm_offset_t
5290 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
5291 {
5292 
5293 	if ((obj == NULL) || (size < NBPDR) ||
5294 	    ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) {
5295 		return addr;
5296 	}
5297 
5298 	addr = roundup2(addr, NBPDR);
5299 	return addr;
5300 }
5301 
5302 /*
5303  * Used by kmalloc/kfree, page already exists at va
5304  */
5305 vm_page_t
5306 pmap_kvtom(vm_offset_t va)
5307 {
5308 	pt_entry_t *ptep = vtopte(va);
5309 
5310 	KKASSERT((*ptep & kernel_pmap.pmap_bits[PG_DEVICE_IDX]) == 0);
5311 	return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
5312 }
5313 
5314 /*
5315  * Initialize machine-specific shared page directory support.  This
5316  * is executed when a VM object is created.
5317  */
5318 void
5319 pmap_object_init(vm_object_t object)
5320 {
5321 	object->md.pmap_rw = NULL;
5322 	object->md.pmap_ro = NULL;
5323 }
5324 
5325 /*
5326  * Clean up machine-specific shared page directory support.  This
5327  * is executed when a VM object is destroyed.
5328  */
5329 void
5330 pmap_object_free(vm_object_t object)
5331 {
5332 	pmap_t pmap;
5333 
5334 	if ((pmap = object->md.pmap_rw) != NULL) {
5335 		object->md.pmap_rw = NULL;
5336 		pmap_remove_noinval(pmap,
5337 				  VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
5338 		CPUMASK_ASSZERO(pmap->pm_active);
5339 		pmap_release(pmap);
5340 		pmap_puninit(pmap);
5341 		kfree(pmap, M_OBJPMAP);
5342 	}
5343 	if ((pmap = object->md.pmap_ro) != NULL) {
5344 		object->md.pmap_ro = NULL;
5345 		pmap_remove_noinval(pmap,
5346 				  VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
5347 		CPUMASK_ASSZERO(pmap->pm_active);
5348 		pmap_release(pmap);
5349 		pmap_puninit(pmap);
5350 		kfree(pmap, M_OBJPMAP);
5351 	}
5352 }
5353