xref: /freebsd/sys/i386/i386/pmap.c (revision f374ba41)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *	This product includes software developed by the University of
28  *	California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * All rights reserved.
50  * Copyright (c) 2018 The FreeBSD Foundation
51  * All rights reserved.
52  *
53  * This software was developed for the FreeBSD Project by Jake Burkholder,
54  * Safeport Network Services, and Network Associates Laboratories, the
55  * Security Research Division of Network Associates, Inc. under
56  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57  * CHATS research program.
58  *
59  * Portions of this software were developed by
60  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61  * the FreeBSD Foundation.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  *
72  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82  * SUCH DAMAGE.
83  */
84 
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
87 
88 /*
89  *	Manages physical address maps.
90  *
91  *	Since the information managed by this module is
92  *	also stored by the logical address mapping module,
93  *	this module may throw away valid virtual-to-physical
94  *	mappings at almost any time.  However, invalidations
95  *	of virtual-to-physical mappings must be done as
96  *	requested.
97  *
98  *	In order to cope with hardware architectures which
99  *	make virtual-to-physical map invalidates expensive,
100  *	this module may delay invalidate or reduced protection
101  *	operations until such time as they are actually
102  *	necessary.  This module is given full information as
103  *	to which processors are currently using which maps,
104  *	and to when physical maps must be made correct.
105  */
106 
107 #include "opt_apic.h"
108 #include "opt_cpu.h"
109 #include "opt_pmap.h"
110 #include "opt_smp.h"
111 #include "opt_vm.h"
112 
113 #include <sys/param.h>
114 #include <sys/systm.h>
115 #include <sys/kernel.h>
116 #include <sys/ktr.h>
117 #include <sys/lock.h>
118 #include <sys/malloc.h>
119 #include <sys/mman.h>
120 #include <sys/msgbuf.h>
121 #include <sys/mutex.h>
122 #include <sys/proc.h>
123 #include <sys/rwlock.h>
124 #include <sys/sbuf.h>
125 #include <sys/sf_buf.h>
126 #include <sys/sx.h>
127 #include <sys/vmmeter.h>
128 #include <sys/sched.h>
129 #include <sys/sysctl.h>
130 #include <sys/smp.h>
131 #include <sys/vmem.h>
132 
133 #include <vm/vm.h>
134 #include <vm/vm_param.h>
135 #include <vm/vm_kern.h>
136 #include <vm/vm_page.h>
137 #include <vm/vm_map.h>
138 #include <vm/vm_object.h>
139 #include <vm/vm_extern.h>
140 #include <vm/vm_pageout.h>
141 #include <vm/vm_pager.h>
142 #include <vm/vm_phys.h>
143 #include <vm/vm_radix.h>
144 #include <vm/vm_reserv.h>
145 #include <vm/uma.h>
146 
147 #ifdef DEV_APIC
148 #include <sys/bus.h>
149 #include <machine/intr_machdep.h>
150 #include <x86/apicvar.h>
151 #endif
152 #include <x86/ifunc.h>
153 #include <machine/bootinfo.h>
154 #include <machine/cpu.h>
155 #include <machine/cputypes.h>
156 #include <machine/md_var.h>
157 #include <machine/pcb.h>
158 #include <machine/specialreg.h>
159 #ifdef SMP
160 #include <machine/smp.h>
161 #endif
162 #include <machine/pmap_base.h>
163 
164 #if !defined(DIAGNOSTIC)
165 #ifdef __GNUC_GNU_INLINE__
166 #define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
167 #else
168 #define PMAP_INLINE	extern inline
169 #endif
170 #else
171 #define PMAP_INLINE
172 #endif
173 
174 #ifdef PV_STATS
175 #define PV_STAT(x)	do { x ; } while (0)
176 #else
177 #define PV_STAT(x)	do { } while (0)
178 #endif
179 
180 #define	pa_index(pa)	((pa) >> PDRSHIFT)
181 #define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
182 
183 /*
184  * PTmap is recursive pagemap at top of virtual address space.
185  * Within PTmap, the page directory can be found (third indirection).
186  */
187 #define	PTmap	((pt_entry_t *)(PTDPTDI << PDRSHIFT))
188 #define	PTD	((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE)))
189 #define	PTDpde	((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE) + \
190     (PTDPTDI * PDESIZE)))
191 
192 /*
193  * Translate a virtual address to the kernel virtual address of its page table
194  * entry (PTE).  This can be used recursively.  If the address of a PTE as
195  * previously returned by this macro is itself given as the argument, then the
196  * address of the page directory entry (PDE) that maps the PTE will be
197  * returned.
198  *
199  * This macro may be used before pmap_bootstrap() is called.
200  */
201 #define	vtopte(va)	(PTmap + i386_btop(va))
202 
203 /*
204  * Get PDEs and PTEs for user/kernel address space
205  */
206 #define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
207 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
208 
209 #define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
210 #define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
211 #define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
212 #define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
213 #define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
214 
215 #define pmap_pte_set_w(pte, v)	((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
216     atomic_clear_int((u_int *)(pte), PG_W))
217 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
218 
219 static int pgeflag = 0;		/* PG_G or-in */
220 static int pseflag = 0;		/* PG_PS or-in */
221 
222 static int nkpt = NKPT;
223 
224 #ifdef PMAP_PAE_COMP
225 pt_entry_t pg_nx;
226 static uma_zone_t pdptzone;
227 #else
228 #define	pg_nx	0
229 #endif
230 
231 _Static_assert(VM_MAXUSER_ADDRESS == VADDR(TRPTDI, 0), "VM_MAXUSER_ADDRESS");
232 _Static_assert(VM_MAX_KERNEL_ADDRESS <= VADDR(PTDPTDI, 0),
233     "VM_MAX_KERNEL_ADDRESS");
234 _Static_assert(PMAP_MAP_LOW == VADDR(LOWPTDI, 0), "PMAP_MAP_LOW");
235 _Static_assert(KERNLOAD == (KERNPTDI << PDRSHIFT), "KERNLOAD");
236 
237 extern int pat_works;
238 extern int pg_ps_enabled;
239 
240 extern int elf32_nxstack;
241 
242 #define	PAT_INDEX_SIZE	8
243 static int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
244 
245 /*
246  * pmap_mapdev support pre initialization (i.e. console)
247  */
248 #define	PMAP_PREINIT_MAPPING_COUNT	8
249 static struct pmap_preinit_mapping {
250 	vm_paddr_t	pa;
251 	vm_offset_t	va;
252 	vm_size_t	sz;
253 	int		mode;
254 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
255 static int pmap_initialized;
256 
257 static struct rwlock_padalign pvh_global_lock;
258 
259 /*
260  * Data for the pv entry allocation mechanism
261  */
262 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
263 extern int pv_entry_max, pv_entry_count;
264 static int pv_entry_high_water = 0;
265 static struct md_page *pv_table;
266 extern int shpgperproc;
267 
268 static struct pv_chunk *pv_chunkbase;	/* KVA block for pv_chunks */
269 static int pv_maxchunks;		/* How many chunks we have KVA for */
270 static vm_offset_t pv_vafree;		/* freelist stored in the PTE */
271 
272 /*
273  * All those kernel PT submaps that BSD is so fond of
274  */
275 static pt_entry_t *CMAP3;
276 static pd_entry_t *KPTD;
277 static caddr_t CADDR3;
278 
279 /*
280  * Crashdump maps.
281  */
282 static caddr_t crashdumpmap;
283 
284 static pt_entry_t *PMAP1 = NULL, *PMAP2, *PMAP3;
285 static pt_entry_t *PADDR1 = NULL, *PADDR2, *PADDR3;
286 #ifdef SMP
287 static int PMAP1cpu, PMAP3cpu;
288 extern int PMAP1changedcpu;
289 #endif
290 extern int PMAP1changed;
291 extern int PMAP1unchanged;
292 static struct mtx PMAP2mutex;
293 
294 /*
295  * Internal flags for pmap_enter()'s helper functions.
296  */
297 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
298 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
299 
300 static void	free_pv_chunk(struct pv_chunk *pc);
301 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
302 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
303 static void	pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
304 static bool	pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
305 		    u_int flags);
306 #if VM_NRESERVLEVEL > 0
307 static void	pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
308 #endif
309 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
310 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
311 		    vm_offset_t va);
312 static int	pmap_pvh_wired_mappings(struct md_page *pvh, int count);
313 
314 static void	pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
315 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
316 static bool	pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
317 		    vm_prot_t prot);
318 static int	pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
319 		    u_int flags, vm_page_t m);
320 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
321     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
322 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted);
323 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
324 		    pd_entry_t pde);
325 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
326 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
327 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
328 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
329 static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
330 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
331 #if VM_NRESERVLEVEL > 0
332 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
333 #endif
334 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
335     vm_prot_t prot);
336 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
337 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
338     struct spglist *free);
339 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
340     struct spglist *free);
341 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
342 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free);
343 static bool	pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
344 		    struct spglist *free);
345 static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va);
346 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
347 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
348     vm_page_t m);
349 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
350     pd_entry_t newpde);
351 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
352 
353 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
354 
355 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
356 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
357 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
358 static void pmap_pte_release(pt_entry_t *pte);
359 static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *);
360 #ifdef PMAP_PAE_COMP
361 static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain,
362     uint8_t *flags, int wait);
363 #endif
364 static void pmap_init_trm(void);
365 static void pmap_invalidate_all_int(pmap_t pmap);
366 
367 static __inline void pagezero(void *page);
368 
369 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
370 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
371 
372 extern char _end[];
373 extern u_long physfree;	/* phys addr of next free page */
374 extern u_long vm86phystk;/* PA of vm86/bios stack */
375 extern u_long vm86paddr;/* address of vm86 region */
376 extern int vm86pa;	/* phys addr of vm86 region */
377 extern u_long KERNend;	/* phys addr end of kernel (just after bss) */
378 #ifdef PMAP_PAE_COMP
379 pd_entry_t *IdlePTD_pae;	/* phys addr of kernel PTD */
380 pdpt_entry_t *IdlePDPT;	/* phys addr of kernel PDPT */
381 pt_entry_t *KPTmap_pae;	/* address of kernel page tables */
382 #define	IdlePTD	IdlePTD_pae
383 #define	KPTmap	KPTmap_pae
384 #else
385 pd_entry_t *IdlePTD_nopae;
386 pt_entry_t *KPTmap_nopae;
387 #define	IdlePTD	IdlePTD_nopae
388 #define	KPTmap	KPTmap_nopae
389 #endif
390 extern u_long KPTphys;	/* phys addr of kernel page tables */
391 extern u_long tramp_idleptd;
392 
393 static u_long
394 allocpages(u_int cnt, u_long *physfree)
395 {
396 	u_long res;
397 
398 	res = *physfree;
399 	*physfree += PAGE_SIZE * cnt;
400 	bzero((void *)res, PAGE_SIZE * cnt);
401 	return (res);
402 }
403 
404 static void
405 pmap_cold_map(u_long pa, u_long va, u_long cnt)
406 {
407 	pt_entry_t *pt;
408 
409 	for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0;
410 	    cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE)
411 		*pt = pa | PG_V | PG_RW | PG_A | PG_M;
412 }
413 
414 static void
415 pmap_cold_mapident(u_long pa, u_long cnt)
416 {
417 
418 	pmap_cold_map(pa, pa, cnt);
419 }
420 
421 _Static_assert(LOWPTDI * 2 * NBPDR == KERNBASE,
422     "Broken double-map of zero PTD");
423 
424 static void
425 __CONCAT(PMTYPE, remap_lower)(bool enable)
426 {
427 	int i;
428 
429 	for (i = 0; i < LOWPTDI; i++)
430 		IdlePTD[i] = enable ? IdlePTD[LOWPTDI + i] : 0;
431 	load_cr3(rcr3());		/* invalidate TLB */
432 }
433 
434 /*
435  * Called from locore.s before paging is enabled.  Sets up the first
436  * kernel page table.  Since kernel is mapped with PA == VA, this code
437  * does not require relocations.
438  */
439 void
440 __CONCAT(PMTYPE, cold)(void)
441 {
442 	pt_entry_t *pt;
443 	u_long a;
444 	u_int cr3, ncr4;
445 
446 	physfree = (u_long)&_end;
447 	if (bootinfo.bi_esymtab != 0)
448 		physfree = bootinfo.bi_esymtab;
449 	if (bootinfo.bi_kernend != 0)
450 		physfree = bootinfo.bi_kernend;
451 	physfree = roundup2(physfree, NBPDR);
452 	KERNend = physfree;
453 
454 	/* Allocate Kernel Page Tables */
455 	KPTphys = allocpages(NKPT, &physfree);
456 	KPTmap = (pt_entry_t *)KPTphys;
457 
458 	/* Allocate Page Table Directory */
459 #ifdef PMAP_PAE_COMP
460 	/* XXX only need 32 bytes (easier for now) */
461 	IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree);
462 #endif
463 	IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree);
464 
465 	/*
466 	 * Allocate KSTACK.  Leave a guard page between IdlePTD and
467 	 * proc0kstack, to control stack overflow for thread0 and
468 	 * prevent corruption of the page table.  We leak the guard
469 	 * physical memory due to 1:1 mappings.
470 	 */
471 	allocpages(1, &physfree);
472 	proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree);
473 
474 	/* vm86/bios stack */
475 	vm86phystk = allocpages(1, &physfree);
476 
477 	/* pgtable + ext + IOPAGES */
478 	vm86paddr = vm86pa = allocpages(3, &physfree);
479 
480 	/* Install page tables into PTD.  Page table page 1 is wasted. */
481 	for (a = 0; a < NKPT; a++)
482 		IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M;
483 
484 #ifdef PMAP_PAE_COMP
485 	/* PAE install PTD pointers into PDPT */
486 	for (a = 0; a < NPGPTD; a++)
487 		IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V;
488 #endif
489 
490 	/*
491 	 * Install recursive mapping for kernel page tables into
492 	 * itself.
493 	 */
494 	for (a = 0; a < NPGPTD; a++)
495 		IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V |
496 		    PG_RW;
497 
498 	/*
499 	 * Initialize page table pages mapping physical address zero
500 	 * through the (physical) end of the kernel.  Many of these
501 	 * pages must be reserved, and we reserve them all and map
502 	 * them linearly for convenience.  We do this even if we've
503 	 * enabled PSE above; we'll just switch the corresponding
504 	 * kernel PDEs before we turn on paging.
505 	 *
506 	 * This and all other page table entries allow read and write
507 	 * access for various reasons.  Kernel mappings never have any
508 	 * access restrictions.
509 	 */
510 	pmap_cold_mapident(0, atop(NBPDR) * LOWPTDI);
511 	pmap_cold_map(0, NBPDR * LOWPTDI, atop(NBPDR) * LOWPTDI);
512 	pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE));
513 
514 	/* Map page table directory */
515 #ifdef PMAP_PAE_COMP
516 	pmap_cold_mapident((u_long)IdlePDPT, 1);
517 #endif
518 	pmap_cold_mapident((u_long)IdlePTD, NPGPTD);
519 
520 	/* Map early KPTmap.  It is really pmap_cold_mapident. */
521 	pmap_cold_map(KPTphys, (u_long)KPTmap, NKPT);
522 
523 	/* Map proc0kstack */
524 	pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES);
525 	/* ISA hole already mapped */
526 
527 	pmap_cold_mapident(vm86phystk, 1);
528 	pmap_cold_mapident(vm86pa, 3);
529 
530 	/* Map page 0 into the vm86 page table */
531 	*(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V;
532 
533 	/* ...likewise for the ISA hole for vm86 */
534 	for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0;
535 	    a < atop(ISA_HOLE_LENGTH); a++, pt++)
536 		*pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A |
537 		    PG_M | PG_V;
538 
539 	/* Enable PSE, PGE, VME, and PAE if configured. */
540 	ncr4 = 0;
541 	if ((cpu_feature & CPUID_PSE) != 0) {
542 		ncr4 |= CR4_PSE;
543 		pseflag = PG_PS;
544 		/*
545 		 * Superpage mapping of the kernel text.  Existing 4k
546 		 * page table pages are wasted.
547 		 */
548 		for (a = KERNBASE; a < KERNend; a += NBPDR)
549 			IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M |
550 			    PG_RW | PG_V;
551 	}
552 	if ((cpu_feature & CPUID_PGE) != 0) {
553 		ncr4 |= CR4_PGE;
554 		pgeflag = PG_G;
555 	}
556 	ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0;
557 #ifdef PMAP_PAE_COMP
558 	ncr4 |= CR4_PAE;
559 #endif
560 	if (ncr4 != 0)
561 		load_cr4(rcr4() | ncr4);
562 
563 	/* Now enable paging */
564 #ifdef PMAP_PAE_COMP
565 	cr3 = (u_int)IdlePDPT;
566 	if ((cpu_feature & CPUID_PAT) == 0)
567 		wbinvd();
568 #else
569 	cr3 = (u_int)IdlePTD;
570 #endif
571 	tramp_idleptd = cr3;
572 	load_cr3(cr3);
573 	load_cr0(rcr0() | CR0_PG);
574 
575 	/*
576 	 * Now running relocated at KERNBASE where the system is
577 	 * linked to run.
578 	 */
579 
580 	/*
581 	 * Remove the lowest part of the double mapping of low memory
582 	 * to get some null pointer checks.
583 	 */
584 	__CONCAT(PMTYPE, remap_lower)(false);
585 
586 	kernel_vm_end = /* 0 + */ NKPT * NBPDR;
587 #ifdef PMAP_PAE_COMP
588 	i386_pmap_VM_NFREEORDER = VM_NFREEORDER_PAE;
589 	i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_PAE;
590 	i386_pmap_PDRSHIFT = PDRSHIFT_PAE;
591 #else
592 	i386_pmap_VM_NFREEORDER = VM_NFREEORDER_NOPAE;
593 	i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_NOPAE;
594 	i386_pmap_PDRSHIFT = PDRSHIFT_NOPAE;
595 #endif
596 }
597 
598 static void
599 __CONCAT(PMTYPE, set_nx)(void)
600 {
601 
602 #ifdef PMAP_PAE_COMP
603 	if ((amd_feature & AMDID_NX) == 0)
604 		return;
605 	pg_nx = PG_NX;
606 	elf32_nxstack = 1;
607 	/* EFER.EFER_NXE is set in initializecpu(). */
608 #endif
609 }
610 
611 /*
612  *	Bootstrap the system enough to run with virtual memory.
613  *
614  *	On the i386 this is called after pmap_cold() created initial
615  *	kernel page table and enabled paging, and just syncs the pmap
616  *	module with what has already been done.
617  */
618 static void
619 __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr)
620 {
621 	vm_offset_t va;
622 	pt_entry_t *pte, *unused __unused;
623 	struct pcpu *pc;
624 	u_long res;
625 	int i;
626 
627 	res = atop(firstaddr - (vm_paddr_t)KERNLOAD);
628 
629 	/*
630 	 * Add a physical memory segment (vm_phys_seg) corresponding to the
631 	 * preallocated kernel page table pages so that vm_page structures
632 	 * representing these pages will be created.  The vm_page structures
633 	 * are required for promotion of the corresponding kernel virtual
634 	 * addresses to superpage mappings.
635 	 */
636 	vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt));
637 
638 	/*
639 	 * Initialize the first available kernel virtual address.
640 	 * However, using "firstaddr" may waste a few pages of the
641 	 * kernel virtual address space, because pmap_cold() may not
642 	 * have mapped every physical page that it allocated.
643 	 * Preferably, pmap_cold() would provide a first unused
644 	 * virtual address in addition to "firstaddr".
645 	 */
646 	virtual_avail = (vm_offset_t)firstaddr;
647 	virtual_end = VM_MAX_KERNEL_ADDRESS;
648 
649 	/*
650 	 * Initialize the kernel pmap (which is statically allocated).
651 	 * Count bootstrap data as being resident in case any of this data is
652 	 * later unmapped (using pmap_remove()) and freed.
653 	 */
654 	PMAP_LOCK_INIT(kernel_pmap);
655 	kernel_pmap->pm_pdir = IdlePTD;
656 #ifdef PMAP_PAE_COMP
657 	kernel_pmap->pm_pdpt = IdlePDPT;
658 #endif
659 	CPU_FILL(&kernel_pmap->pm_active);	/* don't allow deactivation */
660 	kernel_pmap->pm_stats.resident_count = res;
661 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
662 
663  	/*
664 	 * Initialize the global pv list lock.
665 	 */
666 	rw_init(&pvh_global_lock, "pmap pv global");
667 
668 	/*
669 	 * Reserve some special page table entries/VA space for temporary
670 	 * mapping of pages.
671 	 */
672 #define	SYSMAP(c, p, v, n)	\
673 	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
674 
675 	va = virtual_avail;
676 	pte = vtopte(va);
677 
678 	/*
679 	 * Initialize temporary map objects on the current CPU for use
680 	 * during early boot.
681 	 * CMAP1/CMAP2 are used for zeroing and copying pages.
682 	 * CMAP3 is used for the boot-time memory test.
683 	 */
684 	pc = get_pcpu();
685 	mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
686 	SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1)
687 	SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1)
688 	SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1)
689 
690 	SYSMAP(caddr_t, CMAP3, CADDR3, 1);
691 
692 	/*
693 	 * Crashdump maps.
694 	 */
695 	SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
696 
697 	/*
698 	 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
699 	 */
700 	SYSMAP(caddr_t, unused, ptvmmap, 1)
701 
702 	/*
703 	 * msgbufp is used to map the system message buffer.
704 	 */
705 	SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize)))
706 
707 	/*
708 	 * KPTmap is used by pmap_kextract().
709 	 *
710 	 * KPTmap is first initialized by pmap_cold().  However, that initial
711 	 * KPTmap can only support NKPT page table pages.  Here, a larger
712 	 * KPTmap is created that can support KVA_PAGES page table pages.
713 	 */
714 	SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES)
715 
716 	for (i = 0; i < NKPT; i++)
717 		KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V;
718 
719 	/*
720 	 * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(),
721 	 * respectively.
722 	 */
723 	SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1)
724 	SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1)
725 	SYSMAP(pt_entry_t *, PMAP3, PADDR3, 1)
726 
727 	mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
728 
729 	virtual_avail = va;
730 
731 	/*
732 	 * Initialize the PAT MSR if present.
733 	 * pmap_init_pat() clears and sets CR4_PGE, which, as a
734 	 * side-effect, invalidates stale PG_G TLB entries that might
735 	 * have been created in our pre-boot environment.  We assume
736 	 * that PAT support implies PGE and in reverse, PGE presence
737 	 * comes with PAT.  Both features were added for Pentium Pro.
738 	 */
739 	pmap_init_pat();
740 }
741 
742 static void
743 pmap_init_reserved_pages(void)
744 {
745 	struct pcpu *pc;
746 	vm_offset_t pages;
747 	int i;
748 
749 #ifdef PMAP_PAE_COMP
750 	if (!pae_mode)
751 		return;
752 #else
753 	if (pae_mode)
754 		return;
755 #endif
756 	CPU_FOREACH(i) {
757 		pc = pcpu_find(i);
758 		mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF |
759 		    MTX_NEW);
760 		pc->pc_copyout_maddr = kva_alloc(ptoa(2));
761 		if (pc->pc_copyout_maddr == 0)
762 			panic("unable to allocate non-sleepable copyout KVA");
763 		sx_init(&pc->pc_copyout_slock, "cpslk");
764 		pc->pc_copyout_saddr = kva_alloc(ptoa(2));
765 		if (pc->pc_copyout_saddr == 0)
766 			panic("unable to allocate sleepable copyout KVA");
767 		pc->pc_pmap_eh_va = kva_alloc(ptoa(1));
768 		if (pc->pc_pmap_eh_va == 0)
769 			panic("unable to allocate pmap_extract_and_hold KVA");
770 		pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va);
771 
772 		/*
773 		 * Skip if the mappings have already been initialized,
774 		 * i.e. this is the BSP.
775 		 */
776 		if (pc->pc_cmap_addr1 != 0)
777 			continue;
778 
779 		mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
780 		pages = kva_alloc(PAGE_SIZE * 3);
781 		if (pages == 0)
782 			panic("unable to allocate CMAP KVA");
783 		pc->pc_cmap_pte1 = vtopte(pages);
784 		pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE);
785 		pc->pc_cmap_addr1 = (caddr_t)pages;
786 		pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE);
787 		pc->pc_qmap_addr = pages + ptoa(2);
788 	}
789 }
790 
791 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL);
792 
793 /*
794  * Setup the PAT MSR.
795  */
796 static void
797 __CONCAT(PMTYPE, init_pat)(void)
798 {
799 	int pat_table[PAT_INDEX_SIZE];
800 	uint64_t pat_msr;
801 	u_long cr0, cr4;
802 	int i;
803 
804 	/* Set default PAT index table. */
805 	for (i = 0; i < PAT_INDEX_SIZE; i++)
806 		pat_table[i] = -1;
807 	pat_table[PAT_WRITE_BACK] = 0;
808 	pat_table[PAT_WRITE_THROUGH] = 1;
809 	pat_table[PAT_UNCACHEABLE] = 3;
810 	pat_table[PAT_WRITE_COMBINING] = 3;
811 	pat_table[PAT_WRITE_PROTECTED] = 3;
812 	pat_table[PAT_UNCACHED] = 3;
813 
814 	/*
815 	 * Bail if this CPU doesn't implement PAT.
816 	 * We assume that PAT support implies PGE.
817 	 */
818 	if ((cpu_feature & CPUID_PAT) == 0) {
819 		for (i = 0; i < PAT_INDEX_SIZE; i++)
820 			pat_index[i] = pat_table[i];
821 		pat_works = 0;
822 		return;
823 	}
824 
825 	/*
826 	 * Due to some Intel errata, we can only safely use the lower 4
827 	 * PAT entries.
828 	 *
829 	 *   Intel Pentium III Processor Specification Update
830 	 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
831 	 * or Mode C Paging)
832 	 *
833 	 *   Intel Pentium IV  Processor Specification Update
834 	 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
835 	 */
836 	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
837 	    !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe))
838 		pat_works = 0;
839 
840 	/* Initialize default PAT entries. */
841 	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
842 	    PAT_VALUE(1, PAT_WRITE_THROUGH) |
843 	    PAT_VALUE(2, PAT_UNCACHED) |
844 	    PAT_VALUE(3, PAT_UNCACHEABLE) |
845 	    PAT_VALUE(4, PAT_WRITE_BACK) |
846 	    PAT_VALUE(5, PAT_WRITE_THROUGH) |
847 	    PAT_VALUE(6, PAT_UNCACHED) |
848 	    PAT_VALUE(7, PAT_UNCACHEABLE);
849 
850 	if (pat_works) {
851 		/*
852 		 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
853 		 * Program 5 and 6 as WP and WC.
854 		 * Leave 4 and 7 as WB and UC.
855 		 */
856 		pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
857 		pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
858 		    PAT_VALUE(6, PAT_WRITE_COMBINING);
859 		pat_table[PAT_UNCACHED] = 2;
860 		pat_table[PAT_WRITE_PROTECTED] = 5;
861 		pat_table[PAT_WRITE_COMBINING] = 6;
862 	} else {
863 		/*
864 		 * Just replace PAT Index 2 with WC instead of UC-.
865 		 */
866 		pat_msr &= ~PAT_MASK(2);
867 		pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
868 		pat_table[PAT_WRITE_COMBINING] = 2;
869 	}
870 
871 	/* Disable PGE. */
872 	cr4 = rcr4();
873 	load_cr4(cr4 & ~CR4_PGE);
874 
875 	/* Disable caches (CD = 1, NW = 0). */
876 	cr0 = rcr0();
877 	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
878 
879 	/* Flushes caches and TLBs. */
880 	wbinvd();
881 	invltlb();
882 
883 	/* Update PAT and index table. */
884 	wrmsr(MSR_PAT, pat_msr);
885 	for (i = 0; i < PAT_INDEX_SIZE; i++)
886 		pat_index[i] = pat_table[i];
887 
888 	/* Flush caches and TLBs again. */
889 	wbinvd();
890 	invltlb();
891 
892 	/* Restore caches and PGE. */
893 	load_cr0(cr0);
894 	load_cr4(cr4);
895 }
896 
897 #ifdef PMAP_PAE_COMP
898 static void *
899 pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
900     int wait)
901 {
902 
903 	/* Inform UMA that this allocator uses kernel_map/object. */
904 	*flags = UMA_SLAB_KERNEL;
905 	return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
906 	    bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
907 }
908 #endif
909 
910 /*
911  * Abuse the pte nodes for unmapped kva to thread a kva freelist through.
912  * Requirements:
913  *  - Must deal with pages in order to ensure that none of the PG_* bits
914  *    are ever set, PG_V in particular.
915  *  - Assumes we can write to ptes without pte_store() atomic ops, even
916  *    on PAE systems.  This should be ok.
917  *  - Assumes nothing will ever test these addresses for 0 to indicate
918  *    no mapping instead of correctly checking PG_V.
919  *  - Assumes a vm_offset_t will fit in a pte (true for i386).
920  * Because PG_V is never set, there can be no mappings to invalidate.
921  */
922 static vm_offset_t
923 pmap_ptelist_alloc(vm_offset_t *head)
924 {
925 	pt_entry_t *pte;
926 	vm_offset_t va;
927 
928 	va = *head;
929 	if (va == 0)
930 		panic("pmap_ptelist_alloc: exhausted ptelist KVA");
931 	pte = vtopte(va);
932 	*head = *pte;
933 	if (*head & PG_V)
934 		panic("pmap_ptelist_alloc: va with PG_V set!");
935 	*pte = 0;
936 	return (va);
937 }
938 
939 static void
940 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
941 {
942 	pt_entry_t *pte;
943 
944 	if (va & PG_V)
945 		panic("pmap_ptelist_free: freeing va with PG_V set!");
946 	pte = vtopte(va);
947 	*pte = *head;		/* virtual! PG_V is 0 though */
948 	*head = va;
949 }
950 
951 static void
952 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
953 {
954 	int i;
955 	vm_offset_t va;
956 
957 	*head = 0;
958 	for (i = npages - 1; i >= 0; i--) {
959 		va = (vm_offset_t)base + i * PAGE_SIZE;
960 		pmap_ptelist_free(head, va);
961 	}
962 }
963 
964 /*
965  *	Initialize the pmap module.
966  *	Called by vm_init, to initialize any structures that the pmap
967  *	system needs to map virtual memory.
968  */
969 static void
970 __CONCAT(PMTYPE, init)(void)
971 {
972 	struct pmap_preinit_mapping *ppim;
973 	vm_page_t mpte;
974 	vm_size_t s;
975 	int i, pv_npg;
976 
977 	/*
978 	 * Initialize the vm page array entries for the kernel pmap's
979 	 * page table pages.
980 	 */
981 	PMAP_LOCK(kernel_pmap);
982 	for (i = 0; i < NKPT; i++) {
983 		mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i));
984 		KASSERT(mpte >= vm_page_array &&
985 		    mpte < &vm_page_array[vm_page_array_size],
986 		    ("pmap_init: page table page is out of range"));
987 		mpte->pindex = i + KPTDI;
988 		mpte->phys_addr = KPTphys + ptoa(i);
989 		mpte->ref_count = 1;
990 
991 		/*
992 		 * Collect the page table pages that were replaced by a 2/4MB
993 		 * page.  They are filled with equivalent 4KB page mappings.
994 		 */
995 		if (pseflag != 0 &&
996 		    KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend &&
997 		    pmap_insert_pt_page(kernel_pmap, mpte, true))
998 			panic("pmap_init: pmap_insert_pt_page failed");
999 	}
1000 	PMAP_UNLOCK(kernel_pmap);
1001 	vm_wire_add(NKPT);
1002 
1003 	/*
1004 	 * Initialize the address space (zone) for the pv entries.  Set a
1005 	 * high water mark so that the system can recover from excessive
1006 	 * numbers of pv entries.
1007 	 */
1008 	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1009 	pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
1010 	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1011 	pv_entry_max = roundup(pv_entry_max, _NPCPV);
1012 	pv_entry_high_water = 9 * (pv_entry_max / 10);
1013 
1014 	/*
1015 	 * If the kernel is running on a virtual machine, then it must assume
1016 	 * that MCA is enabled by the hypervisor.  Moreover, the kernel must
1017 	 * be prepared for the hypervisor changing the vendor and family that
1018 	 * are reported by CPUID.  Consequently, the workaround for AMD Family
1019 	 * 10h Erratum 383 is enabled if the processor's feature set does not
1020 	 * include at least one feature that is only supported by older Intel
1021 	 * or newer AMD processors.
1022 	 */
1023 	if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
1024 	    (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
1025 	    CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
1026 	    AMDID2_FMA4)) == 0)
1027 		workaround_erratum383 = 1;
1028 
1029 	/*
1030 	 * Are large page mappings supported and enabled?
1031 	 */
1032 	TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
1033 	if (pseflag == 0)
1034 		pg_ps_enabled = 0;
1035 	else if (pg_ps_enabled) {
1036 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1037 		    ("pmap_init: can't assign to pagesizes[1]"));
1038 		pagesizes[1] = NBPDR;
1039 	}
1040 
1041 	/*
1042 	 * Calculate the size of the pv head table for superpages.
1043 	 * Handle the possibility that "vm_phys_segs[...].end" is zero.
1044 	 */
1045 	pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end -
1046 	    PAGE_SIZE) / NBPDR + 1;
1047 
1048 	/*
1049 	 * Allocate memory for the pv head table for superpages.
1050 	 */
1051 	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1052 	s = round_page(s);
1053 	pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
1054 	for (i = 0; i < pv_npg; i++)
1055 		TAILQ_INIT(&pv_table[i].pv_list);
1056 
1057 	pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
1058 	pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
1059 	if (pv_chunkbase == NULL)
1060 		panic("pmap_init: not enough kvm for pv chunks");
1061 	pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
1062 #ifdef PMAP_PAE_COMP
1063 	pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
1064 	    NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
1065 	    UMA_ZONE_CONTIG | UMA_ZONE_VM | UMA_ZONE_NOFREE);
1066 	uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
1067 #endif
1068 
1069 	pmap_initialized = 1;
1070 	pmap_init_trm();
1071 
1072 	if (!bootverbose)
1073 		return;
1074 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
1075 		ppim = pmap_preinit_mapping + i;
1076 		if (ppim->va == 0)
1077 			continue;
1078 		printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i,
1079 		    (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode);
1080 	}
1081 
1082 }
1083 
1084 extern u_long pmap_pde_demotions;
1085 extern u_long pmap_pde_mappings;
1086 extern u_long pmap_pde_p_failures;
1087 extern u_long pmap_pde_promotions;
1088 
1089 /***************************************************
1090  * Low level helper routines.....
1091  ***************************************************/
1092 
1093 static boolean_t
1094 __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode)
1095 {
1096 
1097 	return (mode >= 0 && mode < PAT_INDEX_SIZE &&
1098 	    pat_index[(int)mode] >= 0);
1099 }
1100 
1101 /*
1102  * Determine the appropriate bits to set in a PTE or PDE for a specified
1103  * caching mode.
1104  */
1105 static int
1106 __CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde)
1107 {
1108 	int cache_bits, pat_flag, pat_idx;
1109 
1110 	if (!pmap_is_valid_memattr(pmap, mode))
1111 		panic("Unknown caching mode %d\n", mode);
1112 
1113 	/* The PAT bit is different for PTE's and PDE's. */
1114 	pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
1115 
1116 	/* Map the caching mode to a PAT index. */
1117 	pat_idx = pat_index[mode];
1118 
1119 	/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
1120 	cache_bits = 0;
1121 	if (pat_idx & 0x4)
1122 		cache_bits |= pat_flag;
1123 	if (pat_idx & 0x2)
1124 		cache_bits |= PG_NC_PCD;
1125 	if (pat_idx & 0x1)
1126 		cache_bits |= PG_NC_PWT;
1127 	return (cache_bits);
1128 }
1129 
1130 static int
1131 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde)
1132 {
1133 	int pat_flag, pat_idx;
1134 
1135 	if ((cpu_feature & CPUID_PAT) == 0)
1136 		return (0);
1137 
1138 	pat_idx = 0;
1139 	/* The PAT bit is different for PTE's and PDE's. */
1140 	pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
1141 
1142 	if ((pte & pat_flag) != 0)
1143 		pat_idx |= 0x4;
1144 	if ((pte & PG_NC_PCD) != 0)
1145 		pat_idx |= 0x2;
1146 	if ((pte & PG_NC_PWT) != 0)
1147 		pat_idx |= 0x1;
1148 
1149 	/* See pmap_init_pat(). */
1150 	if (pat_works) {
1151 		if (pat_idx == 4)
1152 			pat_idx = 0;
1153 		if (pat_idx == 7)
1154 			pat_idx = 3;
1155 	} else {
1156 		/* XXXKIB */
1157 	}
1158 
1159 	return (pat_idx);
1160 }
1161 
1162 static bool
1163 __CONCAT(PMTYPE, ps_enabled)(pmap_t pmap __unused)
1164 {
1165 
1166 	return (pg_ps_enabled);
1167 }
1168 
1169 /*
1170  * The caller is responsible for maintaining TLB consistency.
1171  */
1172 static void
1173 pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde)
1174 {
1175 	pd_entry_t *pde;
1176 
1177 	pde = pmap_pde(kernel_pmap, va);
1178 	pde_store(pde, newpde);
1179 }
1180 
1181 /*
1182  * After changing the page size for the specified virtual address in the page
1183  * table, flush the corresponding entries from the processor's TLB.  Only the
1184  * calling processor's TLB is affected.
1185  *
1186  * The calling thread must be pinned to a processor.
1187  */
1188 static void
1189 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
1190 {
1191 
1192 	if ((newpde & PG_PS) == 0)
1193 		/* Demotion: flush a specific 2MB page mapping. */
1194 		invlpg(va);
1195 	else /* if ((newpde & PG_G) == 0) */
1196 		/*
1197 		 * Promotion: flush every 4KB page mapping from the TLB
1198 		 * because there are too many to flush individually.
1199 		 */
1200 		invltlb();
1201 }
1202 
1203 #ifdef SMP
1204 
1205 static void
1206 pmap_curcpu_cb_dummy(pmap_t pmap __unused, vm_offset_t addr1 __unused,
1207     vm_offset_t addr2 __unused)
1208 {
1209 }
1210 
1211 /*
1212  * For SMP, these functions have to use the IPI mechanism for coherence.
1213  *
1214  * N.B.: Before calling any of the following TLB invalidation functions,
1215  * the calling processor must ensure that all stores updating a non-
1216  * kernel page table are globally performed.  Otherwise, another
1217  * processor could cache an old, pre-update entry without being
1218  * invalidated.  This can happen one of two ways: (1) The pmap becomes
1219  * active on another processor after its pm_active field is checked by
1220  * one of the following functions but before a store updating the page
1221  * table is globally performed. (2) The pmap becomes active on another
1222  * processor before its pm_active field is checked but due to
1223  * speculative loads one of the following functions stills reads the
1224  * pmap as inactive on the other processor.
1225  *
1226  * The kernel page table is exempt because its pm_active field is
1227  * immutable.  The kernel page table is always active on every
1228  * processor.
1229  */
1230 static void
1231 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va)
1232 {
1233 	cpuset_t *mask, other_cpus;
1234 	u_int cpuid;
1235 
1236 	sched_pin();
1237 	if (pmap == kernel_pmap) {
1238 		invlpg(va);
1239 		mask = &all_cpus;
1240 	} else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) {
1241 		mask = &all_cpus;
1242 	} else {
1243 		cpuid = PCPU_GET(cpuid);
1244 		other_cpus = all_cpus;
1245 		CPU_CLR(cpuid, &other_cpus);
1246 		CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
1247 		mask = &other_cpus;
1248 	}
1249 	smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy);
1250 	sched_unpin();
1251 }
1252 
1253 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
1254 #define	PMAP_INVLPG_THRESHOLD	(4 * 1024 * PAGE_SIZE)
1255 
1256 static void
1257 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1258 {
1259 	cpuset_t *mask, other_cpus;
1260 	vm_offset_t addr;
1261 	u_int cpuid;
1262 
1263 	if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
1264 		pmap_invalidate_all_int(pmap);
1265 		return;
1266 	}
1267 
1268 	sched_pin();
1269 	if (pmap == kernel_pmap) {
1270 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
1271 			invlpg(addr);
1272 		mask = &all_cpus;
1273 	} else  if (!CPU_CMP(&pmap->pm_active, &all_cpus)) {
1274 		mask = &all_cpus;
1275 	} else {
1276 		cpuid = PCPU_GET(cpuid);
1277 		other_cpus = all_cpus;
1278 		CPU_CLR(cpuid, &other_cpus);
1279 		CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
1280 		mask = &other_cpus;
1281 	}
1282 	smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy);
1283 	sched_unpin();
1284 }
1285 
1286 static void
1287 pmap_invalidate_all_int(pmap_t pmap)
1288 {
1289 	cpuset_t *mask, other_cpus;
1290 	u_int cpuid;
1291 
1292 	sched_pin();
1293 	if (pmap == kernel_pmap) {
1294 		invltlb();
1295 		mask = &all_cpus;
1296 	} else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) {
1297 		mask = &all_cpus;
1298 	} else {
1299 		cpuid = PCPU_GET(cpuid);
1300 		other_cpus = all_cpus;
1301 		CPU_CLR(cpuid, &other_cpus);
1302 		CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
1303 		mask = &other_cpus;
1304 	}
1305 	smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy);
1306 	sched_unpin();
1307 }
1308 
1309 static void
1310 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused,
1311     vm_offset_t addr1 __unused, vm_offset_t addr2 __unused)
1312 {
1313 	wbinvd();
1314 }
1315 
1316 static void
1317 __CONCAT(PMTYPE, invalidate_cache)(void)
1318 {
1319 	smp_cache_flush(pmap_invalidate_cache_curcpu_cb);
1320 }
1321 
1322 struct pde_action {
1323 	cpuset_t invalidate;	/* processors that invalidate their TLB */
1324 	vm_offset_t va;
1325 	pd_entry_t *pde;
1326 	pd_entry_t newpde;
1327 	u_int store;		/* processor that updates the PDE */
1328 };
1329 
1330 static void
1331 pmap_update_pde_kernel(void *arg)
1332 {
1333 	struct pde_action *act = arg;
1334 	pd_entry_t *pde;
1335 
1336 	if (act->store == PCPU_GET(cpuid)) {
1337 		pde = pmap_pde(kernel_pmap, act->va);
1338 		pde_store(pde, act->newpde);
1339 	}
1340 }
1341 
1342 static void
1343 pmap_update_pde_user(void *arg)
1344 {
1345 	struct pde_action *act = arg;
1346 
1347 	if (act->store == PCPU_GET(cpuid))
1348 		pde_store(act->pde, act->newpde);
1349 }
1350 
1351 static void
1352 pmap_update_pde_teardown(void *arg)
1353 {
1354 	struct pde_action *act = arg;
1355 
1356 	if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
1357 		pmap_update_pde_invalidate(act->va, act->newpde);
1358 }
1359 
1360 /*
1361  * Change the page size for the specified virtual address in a way that
1362  * prevents any possibility of the TLB ever having two entries that map the
1363  * same virtual address using different page sizes.  This is the recommended
1364  * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
1365  * machine check exception for a TLB state that is improperly diagnosed as a
1366  * hardware error.
1367  */
1368 static void
1369 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1370 {
1371 	struct pde_action act;
1372 	cpuset_t active, other_cpus;
1373 	u_int cpuid;
1374 
1375 	sched_pin();
1376 	cpuid = PCPU_GET(cpuid);
1377 	other_cpus = all_cpus;
1378 	CPU_CLR(cpuid, &other_cpus);
1379 	if (pmap == kernel_pmap)
1380 		active = all_cpus;
1381 	else
1382 		active = pmap->pm_active;
1383 	if (CPU_OVERLAP(&active, &other_cpus)) {
1384 		act.store = cpuid;
1385 		act.invalidate = active;
1386 		act.va = va;
1387 		act.pde = pde;
1388 		act.newpde = newpde;
1389 		CPU_SET(cpuid, &active);
1390 		smp_rendezvous_cpus(active,
1391 		    smp_no_rendezvous_barrier, pmap == kernel_pmap ?
1392 		    pmap_update_pde_kernel : pmap_update_pde_user,
1393 		    pmap_update_pde_teardown, &act);
1394 	} else {
1395 		if (pmap == kernel_pmap)
1396 			pmap_kenter_pde(va, newpde);
1397 		else
1398 			pde_store(pde, newpde);
1399 		if (CPU_ISSET(cpuid, &active))
1400 			pmap_update_pde_invalidate(va, newpde);
1401 	}
1402 	sched_unpin();
1403 }
1404 #else /* !SMP */
1405 /*
1406  * Normal, non-SMP, 486+ invalidation functions.
1407  * We inline these within pmap.c for speed.
1408  */
1409 static void
1410 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va)
1411 {
1412 
1413 	if (pmap == kernel_pmap)
1414 		invlpg(va);
1415 }
1416 
1417 static void
1418 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1419 {
1420 	vm_offset_t addr;
1421 
1422 	if (pmap == kernel_pmap)
1423 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
1424 			invlpg(addr);
1425 }
1426 
1427 static void
1428 pmap_invalidate_all_int(pmap_t pmap)
1429 {
1430 
1431 	if (pmap == kernel_pmap)
1432 		invltlb();
1433 }
1434 
1435 static void
1436 __CONCAT(PMTYPE, invalidate_cache)(void)
1437 {
1438 
1439 	wbinvd();
1440 }
1441 
1442 static void
1443 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1444 {
1445 
1446 	if (pmap == kernel_pmap)
1447 		pmap_kenter_pde(va, newpde);
1448 	else
1449 		pde_store(pde, newpde);
1450 	if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1451 		pmap_update_pde_invalidate(va, newpde);
1452 }
1453 #endif /* !SMP */
1454 
1455 static void
1456 __CONCAT(PMTYPE, invalidate_page)(pmap_t pmap, vm_offset_t va)
1457 {
1458 
1459 	pmap_invalidate_page_int(pmap, va);
1460 }
1461 
1462 static void
1463 __CONCAT(PMTYPE, invalidate_range)(pmap_t pmap, vm_offset_t sva,
1464     vm_offset_t eva)
1465 {
1466 
1467 	pmap_invalidate_range_int(pmap, sva, eva);
1468 }
1469 
1470 static void
1471 __CONCAT(PMTYPE, invalidate_all)(pmap_t pmap)
1472 {
1473 
1474 	pmap_invalidate_all_int(pmap);
1475 }
1476 
1477 static void
1478 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1479 {
1480 
1481 	/*
1482 	 * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was
1483 	 * created by a promotion that did not invalidate the 512 or 1024 4KB
1484 	 * page mappings that might exist in the TLB.  Consequently, at this
1485 	 * point, the TLB may hold both 4KB and 2- or 4MB page mappings for
1486 	 * the address range [va, va + NBPDR).  Therefore, the entire range
1487 	 * must be invalidated here.  In contrast, when PG_PROMOTED is clear,
1488 	 * the TLB will not hold any 4KB page mappings for the address range
1489 	 * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the
1490 	 * 2- or 4MB page mapping from the TLB.
1491 	 */
1492 	if ((pde & PG_PROMOTED) != 0)
1493 		pmap_invalidate_range_int(pmap, va, va + NBPDR - 1);
1494 	else
1495 		pmap_invalidate_page_int(pmap, va);
1496 }
1497 
1498 /*
1499  * Are we current address space or kernel?
1500  */
1501 static __inline int
1502 pmap_is_current(pmap_t pmap)
1503 {
1504 
1505 	return (pmap == kernel_pmap);
1506 }
1507 
1508 /*
1509  * If the given pmap is not the current or kernel pmap, the returned pte must
1510  * be released by passing it to pmap_pte_release().
1511  */
1512 static pt_entry_t *
1513 __CONCAT(PMTYPE, pte)(pmap_t pmap, vm_offset_t va)
1514 {
1515 	pd_entry_t newpf;
1516 	pd_entry_t *pde;
1517 
1518 	pde = pmap_pde(pmap, va);
1519 	if (*pde & PG_PS)
1520 		return (pde);
1521 	if (*pde != 0) {
1522 		/* are we current address space or kernel? */
1523 		if (pmap_is_current(pmap))
1524 			return (vtopte(va));
1525 		mtx_lock(&PMAP2mutex);
1526 		newpf = *pde & PG_FRAME;
1527 		if ((*PMAP2 & PG_FRAME) != newpf) {
1528 			*PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
1529 			pmap_invalidate_page_int(kernel_pmap,
1530 			    (vm_offset_t)PADDR2);
1531 		}
1532 		return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
1533 	}
1534 	return (NULL);
1535 }
1536 
1537 /*
1538  * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
1539  * being NULL.
1540  */
1541 static __inline void
1542 pmap_pte_release(pt_entry_t *pte)
1543 {
1544 
1545 	if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
1546 		mtx_unlock(&PMAP2mutex);
1547 }
1548 
1549 /*
1550  * NB:  The sequence of updating a page table followed by accesses to the
1551  * corresponding pages is subject to the situation described in the "AMD64
1552  * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23,
1553  * "7.3.1 Special Coherency Considerations".  Therefore, issuing the INVLPG
1554  * right after modifying the PTE bits is crucial.
1555  */
1556 static __inline void
1557 invlcaddr(void *caddr)
1558 {
1559 
1560 	invlpg((u_int)caddr);
1561 }
1562 
1563 /*
1564  * Super fast pmap_pte routine best used when scanning
1565  * the pv lists.  This eliminates many coarse-grained
1566  * invltlb calls.  Note that many of the pv list
1567  * scans are across different pmaps.  It is very wasteful
1568  * to do an entire invltlb for checking a single mapping.
1569  *
1570  * If the given pmap is not the current pmap, pvh_global_lock
1571  * must be held and curthread pinned to a CPU.
1572  */
1573 static pt_entry_t *
1574 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
1575 {
1576 	pd_entry_t newpf;
1577 	pd_entry_t *pde;
1578 
1579 	pde = pmap_pde(pmap, va);
1580 	if (*pde & PG_PS)
1581 		return (pde);
1582 	if (*pde != 0) {
1583 		/* are we current address space or kernel? */
1584 		if (pmap_is_current(pmap))
1585 			return (vtopte(va));
1586 		rw_assert(&pvh_global_lock, RA_WLOCKED);
1587 		KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
1588 		newpf = *pde & PG_FRAME;
1589 		if ((*PMAP1 & PG_FRAME) != newpf) {
1590 			*PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
1591 #ifdef SMP
1592 			PMAP1cpu = PCPU_GET(cpuid);
1593 #endif
1594 			invlcaddr(PADDR1);
1595 			PMAP1changed++;
1596 		} else
1597 #ifdef SMP
1598 		if (PMAP1cpu != PCPU_GET(cpuid)) {
1599 			PMAP1cpu = PCPU_GET(cpuid);
1600 			invlcaddr(PADDR1);
1601 			PMAP1changedcpu++;
1602 		} else
1603 #endif
1604 			PMAP1unchanged++;
1605 		return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
1606 	}
1607 	return (0);
1608 }
1609 
1610 static pt_entry_t *
1611 pmap_pte_quick3(pmap_t pmap, vm_offset_t va)
1612 {
1613 	pd_entry_t newpf;
1614 	pd_entry_t *pde;
1615 
1616 	pde = pmap_pde(pmap, va);
1617 	if (*pde & PG_PS)
1618 		return (pde);
1619 	if (*pde != 0) {
1620 		rw_assert(&pvh_global_lock, RA_WLOCKED);
1621 		KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
1622 		newpf = *pde & PG_FRAME;
1623 		if ((*PMAP3 & PG_FRAME) != newpf) {
1624 			*PMAP3 = newpf | PG_RW | PG_V | PG_A | PG_M;
1625 #ifdef SMP
1626 			PMAP3cpu = PCPU_GET(cpuid);
1627 #endif
1628 			invlcaddr(PADDR3);
1629 			PMAP1changed++;
1630 		} else
1631 #ifdef SMP
1632 		if (PMAP3cpu != PCPU_GET(cpuid)) {
1633 			PMAP3cpu = PCPU_GET(cpuid);
1634 			invlcaddr(PADDR3);
1635 			PMAP1changedcpu++;
1636 		} else
1637 #endif
1638 			PMAP1unchanged++;
1639 		return (PADDR3 + (i386_btop(va) & (NPTEPG - 1)));
1640 	}
1641 	return (0);
1642 }
1643 
1644 static pt_entry_t
1645 pmap_pte_ufast(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1646 {
1647 	pt_entry_t *eh_ptep, pte, *ptep;
1648 
1649 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1650 	pde &= PG_FRAME;
1651 	critical_enter();
1652 	eh_ptep = (pt_entry_t *)PCPU_GET(pmap_eh_ptep);
1653 	if ((*eh_ptep & PG_FRAME) != pde) {
1654 		*eh_ptep = pde | PG_RW | PG_V | PG_A | PG_M;
1655 		invlcaddr((void *)PCPU_GET(pmap_eh_va));
1656 	}
1657 	ptep = (pt_entry_t *)PCPU_GET(pmap_eh_va) + (i386_btop(va) &
1658 	    (NPTEPG - 1));
1659 	pte = *ptep;
1660 	critical_exit();
1661 	return (pte);
1662 }
1663 
1664 /*
1665  * Extract from the kernel page table the physical address that is mapped by
1666  * the given virtual address "va".
1667  *
1668  * This function may be used before pmap_bootstrap() is called.
1669  */
1670 static vm_paddr_t
1671 __CONCAT(PMTYPE, kextract)(vm_offset_t va)
1672 {
1673 	vm_paddr_t pa;
1674 
1675 	if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) {
1676 		pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
1677 	} else {
1678 		/*
1679 		 * Beware of a concurrent promotion that changes the PDE at
1680 		 * this point!  For example, vtopte() must not be used to
1681 		 * access the PTE because it would use the new PDE.  It is,
1682 		 * however, safe to use the old PDE because the page table
1683 		 * page is preserved by the promotion.
1684 		 */
1685 		pa = KPTmap[i386_btop(va)];
1686 		pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1687 	}
1688 	return (pa);
1689 }
1690 
1691 /*
1692  *	Routine:	pmap_extract
1693  *	Function:
1694  *		Extract the physical page address associated
1695  *		with the given map/virtual_address pair.
1696  */
1697 static vm_paddr_t
1698 __CONCAT(PMTYPE, extract)(pmap_t pmap, vm_offset_t va)
1699 {
1700 	vm_paddr_t rtval;
1701 	pt_entry_t pte;
1702 	pd_entry_t pde;
1703 
1704 	rtval = 0;
1705 	PMAP_LOCK(pmap);
1706 	pde = pmap->pm_pdir[va >> PDRSHIFT];
1707 	if (pde != 0) {
1708 		if ((pde & PG_PS) != 0)
1709 			rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
1710 		else {
1711 			pte = pmap_pte_ufast(pmap, va, pde);
1712 			rtval = (pte & PG_FRAME) | (va & PAGE_MASK);
1713 		}
1714 	}
1715 	PMAP_UNLOCK(pmap);
1716 	return (rtval);
1717 }
1718 
1719 /*
1720  *	Routine:	pmap_extract_and_hold
1721  *	Function:
1722  *		Atomically extract and hold the physical page
1723  *		with the given pmap and virtual address pair
1724  *		if that mapping permits the given protection.
1725  */
1726 static vm_page_t
1727 __CONCAT(PMTYPE, extract_and_hold)(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1728 {
1729 	pd_entry_t pde;
1730 	pt_entry_t pte;
1731 	vm_page_t m;
1732 
1733 	m = NULL;
1734 	PMAP_LOCK(pmap);
1735 	pde = *pmap_pde(pmap, va);
1736 	if (pde != 0) {
1737 		if (pde & PG_PS) {
1738 			if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0)
1739 				m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
1740 				    (va & PDRMASK));
1741 		} else {
1742 			pte = pmap_pte_ufast(pmap, va, pde);
1743 			if (pte != 0 &&
1744 			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
1745 				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
1746 		}
1747 		if (m != NULL && !vm_page_wire_mapped(m))
1748 			m = NULL;
1749 	}
1750 	PMAP_UNLOCK(pmap);
1751 	return (m);
1752 }
1753 
1754 /***************************************************
1755  * Low level mapping routines.....
1756  ***************************************************/
1757 
1758 /*
1759  * Add a wired page to the kva.
1760  * Note: not SMP coherent.
1761  *
1762  * This function may be used before pmap_bootstrap() is called.
1763  */
1764 static void
1765 __CONCAT(PMTYPE, kenter)(vm_offset_t va, vm_paddr_t pa)
1766 {
1767 	pt_entry_t *pte;
1768 
1769 	pte = vtopte(va);
1770 	pte_store(pte, pa | PG_RW | PG_V);
1771 }
1772 
1773 static __inline void
1774 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
1775 {
1776 	pt_entry_t *pte;
1777 
1778 	pte = vtopte(va);
1779 	pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap,
1780 	    mode, 0));
1781 }
1782 
1783 /*
1784  * Remove a page from the kernel pagetables.
1785  * Note: not SMP coherent.
1786  *
1787  * This function may be used before pmap_bootstrap() is called.
1788  */
1789 static void
1790 __CONCAT(PMTYPE, kremove)(vm_offset_t va)
1791 {
1792 	pt_entry_t *pte;
1793 
1794 	pte = vtopte(va);
1795 	pte_clear(pte);
1796 }
1797 
1798 /*
1799  *	Used to map a range of physical addresses into kernel
1800  *	virtual address space.
1801  *
1802  *	The value passed in '*virt' is a suggested virtual address for
1803  *	the mapping. Architectures which can support a direct-mapped
1804  *	physical to virtual region can return the appropriate address
1805  *	within that region, leaving '*virt' unchanged. Other
1806  *	architectures should map the pages starting at '*virt' and
1807  *	update '*virt' with the first usable address after the mapped
1808  *	region.
1809  */
1810 static vm_offset_t
1811 __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end,
1812     int prot)
1813 {
1814 	vm_offset_t va, sva;
1815 	vm_paddr_t superpage_offset;
1816 	pd_entry_t newpde;
1817 
1818 	va = *virt;
1819 	/*
1820 	 * Does the physical address range's size and alignment permit at
1821 	 * least one superpage mapping to be created?
1822 	 */
1823 	superpage_offset = start & PDRMASK;
1824 	if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) {
1825 		/*
1826 		 * Increase the starting virtual address so that its alignment
1827 		 * does not preclude the use of superpage mappings.
1828 		 */
1829 		if ((va & PDRMASK) < superpage_offset)
1830 			va = (va & ~PDRMASK) + superpage_offset;
1831 		else if ((va & PDRMASK) > superpage_offset)
1832 			va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset;
1833 	}
1834 	sva = va;
1835 	while (start < end) {
1836 		if ((start & PDRMASK) == 0 && end - start >= NBPDR &&
1837 		    pseflag != 0) {
1838 			KASSERT((va & PDRMASK) == 0,
1839 			    ("pmap_map: misaligned va %#x", va));
1840 			newpde = start | PG_PS | PG_RW | PG_V;
1841 			pmap_kenter_pde(va, newpde);
1842 			va += NBPDR;
1843 			start += NBPDR;
1844 		} else {
1845 			pmap_kenter(va, start);
1846 			va += PAGE_SIZE;
1847 			start += PAGE_SIZE;
1848 		}
1849 	}
1850 	pmap_invalidate_range_int(kernel_pmap, sva, va);
1851 	*virt = va;
1852 	return (sva);
1853 }
1854 
1855 /*
1856  * Add a list of wired pages to the kva
1857  * this routine is only used for temporary
1858  * kernel mappings that do not need to have
1859  * page modification or references recorded.
1860  * Note that old mappings are simply written
1861  * over.  The page *must* be wired.
1862  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1863  */
1864 static void
1865 __CONCAT(PMTYPE, qenter)(vm_offset_t sva, vm_page_t *ma, int count)
1866 {
1867 	pt_entry_t *endpte, oldpte, pa, *pte;
1868 	vm_page_t m;
1869 
1870 	oldpte = 0;
1871 	pte = vtopte(sva);
1872 	endpte = pte + count;
1873 	while (pte < endpte) {
1874 		m = *ma++;
1875 		pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap,
1876 		    m->md.pat_mode, 0);
1877 		if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
1878 			oldpte |= *pte;
1879 			pte_store(pte, pa | pg_nx | PG_RW | PG_V);
1880 		}
1881 		pte++;
1882 	}
1883 	if (__predict_false((oldpte & PG_V) != 0))
1884 		pmap_invalidate_range_int(kernel_pmap, sva, sva + count *
1885 		    PAGE_SIZE);
1886 }
1887 
1888 /*
1889  * This routine tears out page mappings from the
1890  * kernel -- it is meant only for temporary mappings.
1891  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1892  */
1893 static void
1894 __CONCAT(PMTYPE, qremove)(vm_offset_t sva, int count)
1895 {
1896 	vm_offset_t va;
1897 
1898 	va = sva;
1899 	while (count-- > 0) {
1900 		pmap_kremove(va);
1901 		va += PAGE_SIZE;
1902 	}
1903 	pmap_invalidate_range_int(kernel_pmap, sva, va);
1904 }
1905 
1906 /***************************************************
1907  * Page table page management routines.....
1908  ***************************************************/
1909 /*
1910  * Schedule the specified unused page table page to be freed.  Specifically,
1911  * add the page to the specified list of pages that will be released to the
1912  * physical memory manager after the TLB has been updated.
1913  */
1914 static __inline void
1915 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1916     boolean_t set_PG_ZERO)
1917 {
1918 
1919 	if (set_PG_ZERO)
1920 		m->flags |= PG_ZERO;
1921 	else
1922 		m->flags &= ~PG_ZERO;
1923 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1924 }
1925 
1926 /*
1927  * Inserts the specified page table page into the specified pmap's collection
1928  * of idle page table pages.  Each of a pmap's page table pages is responsible
1929  * for mapping a distinct range of virtual addresses.  The pmap's collection is
1930  * ordered by this virtual address range.
1931  *
1932  * If "promoted" is false, then the page table page "mpte" must be zero filled.
1933  */
1934 static __inline int
1935 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
1936 {
1937 
1938 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1939 	mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
1940 	return (vm_radix_insert(&pmap->pm_root, mpte));
1941 }
1942 
1943 /*
1944  * Removes the page table page mapping the specified virtual address from the
1945  * specified pmap's collection of idle page table pages, and returns it.
1946  * Otherwise, returns NULL if there is no page table page corresponding to the
1947  * specified virtual address.
1948  */
1949 static __inline vm_page_t
1950 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
1951 {
1952 
1953 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1954 	return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT));
1955 }
1956 
1957 /*
1958  * Decrements a page table page's reference count, which is used to record the
1959  * number of valid page table entries within the page.  If the reference count
1960  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1961  * page table page was unmapped and FALSE otherwise.
1962  */
1963 static inline boolean_t
1964 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
1965 {
1966 
1967 	--m->ref_count;
1968 	if (m->ref_count == 0) {
1969 		_pmap_unwire_ptp(pmap, m, free);
1970 		return (TRUE);
1971 	} else
1972 		return (FALSE);
1973 }
1974 
1975 static void
1976 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
1977 {
1978 
1979 	/*
1980 	 * unmap the page table page
1981 	 */
1982 	pmap->pm_pdir[m->pindex] = 0;
1983 	--pmap->pm_stats.resident_count;
1984 
1985 	/*
1986 	 * There is not need to invalidate the recursive mapping since
1987 	 * we never instantiate such mapping for the usermode pmaps,
1988 	 * and never remove page table pages from the kernel pmap.
1989 	 * Put page on a list so that it is released since all TLB
1990 	 * shootdown is done.
1991 	 */
1992 	MPASS(pmap != kernel_pmap);
1993 	pmap_add_delayed_free_list(m, free, TRUE);
1994 }
1995 
1996 /*
1997  * After removing a page table entry, this routine is used to
1998  * conditionally free the page, and manage the reference count.
1999  */
2000 static int
2001 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free)
2002 {
2003 	pd_entry_t ptepde;
2004 	vm_page_t mpte;
2005 
2006 	if (pmap == kernel_pmap)
2007 		return (0);
2008 	ptepde = *pmap_pde(pmap, va);
2009 	mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
2010 	return (pmap_unwire_ptp(pmap, mpte, free));
2011 }
2012 
2013 /*
2014  * Release a page table page reference after a failed attempt to create a
2015  * mapping.
2016  */
2017 static void
2018 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
2019 {
2020 	struct spglist free;
2021 
2022 	SLIST_INIT(&free);
2023 	if (pmap_unwire_ptp(pmap, mpte, &free)) {
2024 		/*
2025 		 * Although "va" was never mapped, paging-structure caches
2026 		 * could nonetheless have entries that refer to the freed
2027 		 * page table pages.  Invalidate those entries.
2028 		 */
2029 		pmap_invalidate_page_int(pmap, va);
2030 		vm_page_free_pages_toq(&free, true);
2031 	}
2032 }
2033 
2034 /*
2035  * Initialize the pmap for the swapper process.
2036  */
2037 static void
2038 __CONCAT(PMTYPE, pinit0)(pmap_t pmap)
2039 {
2040 
2041 	PMAP_LOCK_INIT(pmap);
2042 	pmap->pm_pdir = IdlePTD;
2043 #ifdef PMAP_PAE_COMP
2044 	pmap->pm_pdpt = IdlePDPT;
2045 #endif
2046 	vm_radix_init(&pmap->pm_root);
2047 	CPU_ZERO(&pmap->pm_active);
2048 	TAILQ_INIT(&pmap->pm_pvchunk);
2049 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2050 	pmap_activate_boot(pmap);
2051 }
2052 
2053 /*
2054  * Initialize a preallocated and zeroed pmap structure,
2055  * such as one in a vmspace structure.
2056  */
2057 static int
2058 __CONCAT(PMTYPE, pinit)(pmap_t pmap)
2059 {
2060 	int i;
2061 
2062 	/*
2063 	 * No need to allocate page table space yet but we do need a valid
2064 	 * page directory table.
2065 	 */
2066 	if (pmap->pm_pdir == NULL) {
2067 		pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
2068 		if (pmap->pm_pdir == NULL)
2069 			return (0);
2070 #ifdef PMAP_PAE_COMP
2071 		pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
2072 		KASSERT(((vm_offset_t)pmap->pm_pdpt &
2073 		    ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
2074 		    ("pmap_pinit: pdpt misaligned"));
2075 		KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
2076 		    ("pmap_pinit: pdpt above 4g"));
2077 #endif
2078 		vm_radix_init(&pmap->pm_root);
2079 	}
2080 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
2081 	    ("pmap_pinit: pmap has reserved page table page(s)"));
2082 
2083 	/*
2084 	 * allocate the page directory page(s)
2085 	 */
2086 	for (i = 0; i < NPGPTD; i++) {
2087 		pmap->pm_ptdpg[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED |
2088 		    VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
2089 #ifdef PMAP_PAE_COMP
2090 		pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(pmap->pm_ptdpg[i]) | PG_V;
2091 #endif
2092 	}
2093 
2094 	pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD);
2095 #ifdef PMAP_PAE_COMP
2096 	if ((cpu_feature & CPUID_PAT) == 0) {
2097 		pmap_invalidate_cache_range(
2098 		    trunc_page((vm_offset_t)pmap->pm_pdpt),
2099 		    round_page((vm_offset_t)pmap->pm_pdpt +
2100 		    NPGPTD * sizeof(pdpt_entry_t)));
2101 	}
2102 #endif
2103 
2104 	/* Install the trampoline mapping. */
2105 	pmap->pm_pdir[TRPTDI] = PTD[TRPTDI];
2106 
2107 	CPU_ZERO(&pmap->pm_active);
2108 	TAILQ_INIT(&pmap->pm_pvchunk);
2109 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2110 
2111 	return (1);
2112 }
2113 
2114 /*
2115  * this routine is called if the page table page is not
2116  * mapped correctly.
2117  */
2118 static vm_page_t
2119 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
2120 {
2121 	vm_paddr_t ptepa;
2122 	vm_page_t m;
2123 
2124 	/*
2125 	 * Allocate a page table page.
2126 	 */
2127 	if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
2128 		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
2129 			PMAP_UNLOCK(pmap);
2130 			rw_wunlock(&pvh_global_lock);
2131 			vm_wait(NULL);
2132 			rw_wlock(&pvh_global_lock);
2133 			PMAP_LOCK(pmap);
2134 		}
2135 
2136 		/*
2137 		 * Indicate the need to retry.  While waiting, the page table
2138 		 * page may have been allocated.
2139 		 */
2140 		return (NULL);
2141 	}
2142 	m->pindex = ptepindex;
2143 
2144 	/*
2145 	 * Map the pagetable page into the process address space, if
2146 	 * it isn't already there.
2147 	 */
2148 
2149 	pmap->pm_stats.resident_count++;
2150 
2151 	ptepa = VM_PAGE_TO_PHYS(m);
2152 	KASSERT((pmap->pm_pdir[ptepindex] & PG_V) == 0,
2153 	    ("%s: page directory entry %#jx is valid",
2154 	    __func__, (uintmax_t)pmap->pm_pdir[ptepindex]));
2155 	pmap->pm_pdir[ptepindex] =
2156 	    (pd_entry_t)(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
2157 
2158 	return (m);
2159 }
2160 
2161 static vm_page_t
2162 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
2163 {
2164 	u_int ptepindex;
2165 	pd_entry_t ptepa;
2166 	vm_page_t m;
2167 
2168 	/*
2169 	 * Calculate pagetable page index
2170 	 */
2171 	ptepindex = va >> PDRSHIFT;
2172 retry:
2173 	/*
2174 	 * Get the page directory entry
2175 	 */
2176 	ptepa = pmap->pm_pdir[ptepindex];
2177 
2178 	/*
2179 	 * This supports switching from a 4MB page to a
2180 	 * normal 4K page.
2181 	 */
2182 	if (ptepa & PG_PS) {
2183 		(void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
2184 		ptepa = pmap->pm_pdir[ptepindex];
2185 	}
2186 
2187 	/*
2188 	 * If the page table page is mapped, we just increment the
2189 	 * hold count, and activate it.
2190 	 */
2191 	if (ptepa) {
2192 		m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
2193 		m->ref_count++;
2194 	} else {
2195 		/*
2196 		 * Here if the pte page isn't mapped, or if it has
2197 		 * been deallocated.
2198 		 */
2199 		m = _pmap_allocpte(pmap, ptepindex, flags);
2200 		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
2201 			goto retry;
2202 	}
2203 	return (m);
2204 }
2205 
2206 /***************************************************
2207 * Pmap allocation/deallocation routines.
2208  ***************************************************/
2209 
2210 /*
2211  * Release any resources held by the given physical map.
2212  * Called when a pmap initialized by pmap_pinit is being released.
2213  * Should only be called if the map contains no valid mappings.
2214  */
2215 static void
2216 __CONCAT(PMTYPE, release)(pmap_t pmap)
2217 {
2218 	vm_page_t m;
2219 	int i;
2220 
2221 	KASSERT(pmap->pm_stats.resident_count == 0,
2222 	    ("pmap_release: pmap resident count %ld != 0",
2223 	    pmap->pm_stats.resident_count));
2224 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
2225 	    ("pmap_release: pmap has reserved page table page(s)"));
2226 	KASSERT(CPU_EMPTY(&pmap->pm_active),
2227 	    ("releasing active pmap %p", pmap));
2228 
2229 	pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
2230 
2231 	for (i = 0; i < NPGPTD; i++) {
2232 		m = pmap->pm_ptdpg[i];
2233 #ifdef PMAP_PAE_COMP
2234 		KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
2235 		    ("pmap_release: got wrong ptd page"));
2236 #endif
2237 		vm_page_unwire_noq(m);
2238 		vm_page_free(m);
2239 	}
2240 }
2241 
2242 /*
2243  * grow the number of kernel page table entries, if needed
2244  */
2245 static void
2246 __CONCAT(PMTYPE, growkernel)(vm_offset_t addr)
2247 {
2248 	vm_paddr_t ptppaddr;
2249 	vm_page_t nkpg;
2250 	pd_entry_t newpdir;
2251 
2252 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
2253 	addr = roundup2(addr, NBPDR);
2254 	if (addr - 1 >= vm_map_max(kernel_map))
2255 		addr = vm_map_max(kernel_map);
2256 	while (kernel_vm_end < addr) {
2257 		if (pdir_pde(PTD, kernel_vm_end)) {
2258 			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2259 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2260 				kernel_vm_end = vm_map_max(kernel_map);
2261 				break;
2262 			}
2263 			continue;
2264 		}
2265 
2266 		nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
2267 		    VM_ALLOC_ZERO);
2268 		if (nkpg == NULL)
2269 			panic("pmap_growkernel: no memory to grow kernel");
2270 		nkpg->pindex = kernel_vm_end >> PDRSHIFT;
2271 		nkpt++;
2272 
2273 		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
2274 		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
2275 		pdir_pde(KPTD, kernel_vm_end) = newpdir;
2276 
2277 		pmap_kenter_pde(kernel_vm_end, newpdir);
2278 		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2279 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2280 			kernel_vm_end = vm_map_max(kernel_map);
2281 			break;
2282 		}
2283 	}
2284 }
2285 
2286 /***************************************************
2287  * page management routines.
2288  ***************************************************/
2289 
2290 static const uint32_t pc_freemask[_NPCM] = {
2291 	[0 ... _NPCM - 2] = PC_FREEN,
2292 	[_NPCM - 1] = PC_FREEL
2293 };
2294 
2295 #ifdef PV_STATS
2296 extern int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2297 extern long pv_entry_frees, pv_entry_allocs;
2298 extern int pv_entry_spare;
2299 #endif
2300 
2301 /*
2302  * We are in a serious low memory condition.  Resort to
2303  * drastic measures to free some pages so we can allocate
2304  * another pv entry chunk.
2305  */
2306 static vm_page_t
2307 pmap_pv_reclaim(pmap_t locked_pmap)
2308 {
2309 	struct pch newtail;
2310 	struct pv_chunk *pc;
2311 	struct md_page *pvh;
2312 	pd_entry_t *pde;
2313 	pmap_t pmap;
2314 	pt_entry_t *pte, tpte;
2315 	pv_entry_t pv;
2316 	vm_offset_t va;
2317 	vm_page_t m, m_pc;
2318 	struct spglist free;
2319 	uint32_t inuse;
2320 	int bit, field, freed;
2321 
2322 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2323 	pmap = NULL;
2324 	m_pc = NULL;
2325 	SLIST_INIT(&free);
2326 	TAILQ_INIT(&newtail);
2327 	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
2328 	    SLIST_EMPTY(&free))) {
2329 		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2330 		if (pmap != pc->pc_pmap) {
2331 			if (pmap != NULL) {
2332 				pmap_invalidate_all_int(pmap);
2333 				if (pmap != locked_pmap)
2334 					PMAP_UNLOCK(pmap);
2335 			}
2336 			pmap = pc->pc_pmap;
2337 			/* Avoid deadlock and lock recursion. */
2338 			if (pmap > locked_pmap)
2339 				PMAP_LOCK(pmap);
2340 			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
2341 				pmap = NULL;
2342 				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2343 				continue;
2344 			}
2345 		}
2346 
2347 		/*
2348 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
2349 		 */
2350 		freed = 0;
2351 		for (field = 0; field < _NPCM; field++) {
2352 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2353 			    inuse != 0; inuse &= ~(1UL << bit)) {
2354 				bit = bsfl(inuse);
2355 				pv = &pc->pc_pventry[field * 32 + bit];
2356 				va = pv->pv_va;
2357 				pde = pmap_pde(pmap, va);
2358 				if ((*pde & PG_PS) != 0)
2359 					continue;
2360 				pte = __CONCAT(PMTYPE, pte)(pmap, va);
2361 				tpte = *pte;
2362 				if ((tpte & PG_W) == 0)
2363 					tpte = pte_load_clear(pte);
2364 				pmap_pte_release(pte);
2365 				if ((tpte & PG_W) != 0)
2366 					continue;
2367 				KASSERT(tpte != 0,
2368 				    ("pmap_pv_reclaim: pmap %p va %x zero pte",
2369 				    pmap, va));
2370 				if ((tpte & PG_G) != 0)
2371 					pmap_invalidate_page_int(pmap, va);
2372 				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
2373 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2374 					vm_page_dirty(m);
2375 				if ((tpte & PG_A) != 0)
2376 					vm_page_aflag_set(m, PGA_REFERENCED);
2377 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2378 				if (TAILQ_EMPTY(&m->md.pv_list) &&
2379 				    (m->flags & PG_FICTITIOUS) == 0) {
2380 					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2381 					if (TAILQ_EMPTY(&pvh->pv_list)) {
2382 						vm_page_aflag_clear(m,
2383 						    PGA_WRITEABLE);
2384 					}
2385 				}
2386 				pc->pc_map[field] |= 1UL << bit;
2387 				pmap_unuse_pt(pmap, va, &free);
2388 				freed++;
2389 			}
2390 		}
2391 		if (freed == 0) {
2392 			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2393 			continue;
2394 		}
2395 		/* Every freed mapping is for a 4 KB page. */
2396 		pmap->pm_stats.resident_count -= freed;
2397 		PV_STAT(pv_entry_frees += freed);
2398 		PV_STAT(pv_entry_spare += freed);
2399 		pv_entry_count -= freed;
2400 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2401 		for (field = 0; field < _NPCM; field++)
2402 			if (pc->pc_map[field] != pc_freemask[field]) {
2403 				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2404 				    pc_list);
2405 				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2406 
2407 				/*
2408 				 * One freed pv entry in locked_pmap is
2409 				 * sufficient.
2410 				 */
2411 				if (pmap == locked_pmap)
2412 					goto out;
2413 				break;
2414 			}
2415 		if (field == _NPCM) {
2416 			PV_STAT(pv_entry_spare -= _NPCPV);
2417 			PV_STAT(pc_chunk_count--);
2418 			PV_STAT(pc_chunk_frees++);
2419 			/* Entire chunk is free; return it. */
2420 			m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2421 			pmap_qremove((vm_offset_t)pc, 1);
2422 			pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
2423 			break;
2424 		}
2425 	}
2426 out:
2427 	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
2428 	if (pmap != NULL) {
2429 		pmap_invalidate_all_int(pmap);
2430 		if (pmap != locked_pmap)
2431 			PMAP_UNLOCK(pmap);
2432 	}
2433 	if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) {
2434 		m_pc = SLIST_FIRST(&free);
2435 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2436 		/* Recycle a freed page table page. */
2437 		m_pc->ref_count = 1;
2438 	}
2439 	vm_page_free_pages_toq(&free, true);
2440 	return (m_pc);
2441 }
2442 
2443 /*
2444  * free the pv_entry back to the free list
2445  */
2446 static void
2447 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2448 {
2449 	struct pv_chunk *pc;
2450 	int idx, field, bit;
2451 
2452 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2453 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2454 	PV_STAT(pv_entry_frees++);
2455 	PV_STAT(pv_entry_spare++);
2456 	pv_entry_count--;
2457 	pc = pv_to_chunk(pv);
2458 	idx = pv - &pc->pc_pventry[0];
2459 	field = idx / 32;
2460 	bit = idx % 32;
2461 	pc->pc_map[field] |= 1ul << bit;
2462 	for (idx = 0; idx < _NPCM; idx++)
2463 		if (pc->pc_map[idx] != pc_freemask[idx]) {
2464 			/*
2465 			 * 98% of the time, pc is already at the head of the
2466 			 * list.  If it isn't already, move it to the head.
2467 			 */
2468 			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
2469 			    pc)) {
2470 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2471 				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2472 				    pc_list);
2473 			}
2474 			return;
2475 		}
2476 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2477 	free_pv_chunk(pc);
2478 }
2479 
2480 static void
2481 free_pv_chunk(struct pv_chunk *pc)
2482 {
2483 	vm_page_t m;
2484 
2485  	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2486 	PV_STAT(pv_entry_spare -= _NPCPV);
2487 	PV_STAT(pc_chunk_count--);
2488 	PV_STAT(pc_chunk_frees++);
2489 	/* entire chunk is free, return it */
2490 	m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2491 	pmap_qremove((vm_offset_t)pc, 1);
2492 	vm_page_unwire_noq(m);
2493 	vm_page_free(m);
2494 	pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
2495 }
2496 
2497 /*
2498  * get a new pv_entry, allocating a block from the system
2499  * when needed.
2500  */
2501 static pv_entry_t
2502 get_pv_entry(pmap_t pmap, boolean_t try)
2503 {
2504 	static const struct timeval printinterval = { 60, 0 };
2505 	static struct timeval lastprint;
2506 	int bit, field;
2507 	pv_entry_t pv;
2508 	struct pv_chunk *pc;
2509 	vm_page_t m;
2510 
2511 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2512 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2513 	PV_STAT(pv_entry_allocs++);
2514 	pv_entry_count++;
2515 	if (pv_entry_count > pv_entry_high_water)
2516 		if (ratecheck(&lastprint, &printinterval))
2517 			printf("Approaching the limit on PV entries, consider "
2518 			    "increasing either the vm.pmap.shpgperproc or the "
2519 			    "vm.pmap.pv_entries tunable.\n");
2520 retry:
2521 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2522 	if (pc != NULL) {
2523 		for (field = 0; field < _NPCM; field++) {
2524 			if (pc->pc_map[field]) {
2525 				bit = bsfl(pc->pc_map[field]);
2526 				break;
2527 			}
2528 		}
2529 		if (field < _NPCM) {
2530 			pv = &pc->pc_pventry[field * 32 + bit];
2531 			pc->pc_map[field] &= ~(1ul << bit);
2532 			/* If this was the last item, move it to tail */
2533 			for (field = 0; field < _NPCM; field++)
2534 				if (pc->pc_map[field] != 0) {
2535 					PV_STAT(pv_entry_spare--);
2536 					return (pv);	/* not full, return */
2537 				}
2538 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2539 			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2540 			PV_STAT(pv_entry_spare--);
2541 			return (pv);
2542 		}
2543 	}
2544 	/*
2545 	 * Access to the ptelist "pv_vafree" is synchronized by the pvh
2546 	 * global lock.  If "pv_vafree" is currently non-empty, it will
2547 	 * remain non-empty until pmap_ptelist_alloc() completes.
2548 	 */
2549 	if (pv_vafree == 0 ||
2550 	    (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
2551 		if (try) {
2552 			pv_entry_count--;
2553 			PV_STAT(pc_chunk_tryfail++);
2554 			return (NULL);
2555 		}
2556 		m = pmap_pv_reclaim(pmap);
2557 		if (m == NULL)
2558 			goto retry;
2559 	}
2560 	PV_STAT(pc_chunk_count++);
2561 	PV_STAT(pc_chunk_allocs++);
2562 	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
2563 	pmap_qenter((vm_offset_t)pc, &m, 1);
2564 	pc->pc_pmap = pmap;
2565 	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
2566 	for (field = 1; field < _NPCM; field++)
2567 		pc->pc_map[field] = pc_freemask[field];
2568 	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2569 	pv = &pc->pc_pventry[0];
2570 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2571 	PV_STAT(pv_entry_spare += _NPCPV - 1);
2572 	return (pv);
2573 }
2574 
2575 static __inline pv_entry_t
2576 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2577 {
2578 	pv_entry_t pv;
2579 
2580 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2581 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2582 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2583 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2584 			break;
2585 		}
2586 	}
2587 	return (pv);
2588 }
2589 
2590 static void
2591 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2592 {
2593 	struct md_page *pvh;
2594 	pv_entry_t pv;
2595 	vm_offset_t va_last;
2596 	vm_page_t m;
2597 
2598 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2599 	KASSERT((pa & PDRMASK) == 0,
2600 	    ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
2601 
2602 	/*
2603 	 * Transfer the 4mpage's pv entry for this mapping to the first
2604 	 * page's pv list.
2605 	 */
2606 	pvh = pa_to_pvh(pa);
2607 	va = trunc_4mpage(va);
2608 	pv = pmap_pvh_remove(pvh, pmap, va);
2609 	KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
2610 	m = PHYS_TO_VM_PAGE(pa);
2611 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2612 	/* Instantiate the remaining NPTEPG - 1 pv entries. */
2613 	va_last = va + NBPDR - PAGE_SIZE;
2614 	do {
2615 		m++;
2616 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2617 		    ("pmap_pv_demote_pde: page %p is not managed", m));
2618 		va += PAGE_SIZE;
2619 		pmap_insert_entry(pmap, va, m);
2620 	} while (va < va_last);
2621 }
2622 
2623 #if VM_NRESERVLEVEL > 0
2624 static void
2625 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2626 {
2627 	struct md_page *pvh;
2628 	pv_entry_t pv;
2629 	vm_offset_t va_last;
2630 	vm_page_t m;
2631 
2632 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2633 	KASSERT((pa & PDRMASK) == 0,
2634 	    ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
2635 
2636 	/*
2637 	 * Transfer the first page's pv entry for this mapping to the
2638 	 * 4mpage's pv list.  Aside from avoiding the cost of a call
2639 	 * to get_pv_entry(), a transfer avoids the possibility that
2640 	 * get_pv_entry() calls pmap_collect() and that pmap_collect()
2641 	 * removes one of the mappings that is being promoted.
2642 	 */
2643 	m = PHYS_TO_VM_PAGE(pa);
2644 	va = trunc_4mpage(va);
2645 	pv = pmap_pvh_remove(&m->md, pmap, va);
2646 	KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
2647 	pvh = pa_to_pvh(pa);
2648 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2649 	/* Free the remaining NPTEPG - 1 pv entries. */
2650 	va_last = va + NBPDR - PAGE_SIZE;
2651 	do {
2652 		m++;
2653 		va += PAGE_SIZE;
2654 		pmap_pvh_free(&m->md, pmap, va);
2655 	} while (va < va_last);
2656 }
2657 #endif /* VM_NRESERVLEVEL > 0 */
2658 
2659 static void
2660 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2661 {
2662 	pv_entry_t pv;
2663 
2664 	pv = pmap_pvh_remove(pvh, pmap, va);
2665 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2666 	free_pv_entry(pmap, pv);
2667 }
2668 
2669 static void
2670 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
2671 {
2672 	struct md_page *pvh;
2673 
2674 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2675 	pmap_pvh_free(&m->md, pmap, va);
2676 	if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
2677 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2678 		if (TAILQ_EMPTY(&pvh->pv_list))
2679 			vm_page_aflag_clear(m, PGA_WRITEABLE);
2680 	}
2681 }
2682 
2683 /*
2684  * Create a pv entry for page at pa for
2685  * (pmap, va).
2686  */
2687 static void
2688 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2689 {
2690 	pv_entry_t pv;
2691 
2692 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2693 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2694 	pv = get_pv_entry(pmap, FALSE);
2695 	pv->pv_va = va;
2696 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2697 }
2698 
2699 /*
2700  * Conditionally create a pv entry.
2701  */
2702 static boolean_t
2703 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2704 {
2705 	pv_entry_t pv;
2706 
2707 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2708 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2709 	if (pv_entry_count < pv_entry_high_water &&
2710 	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2711 		pv->pv_va = va;
2712 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2713 		return (TRUE);
2714 	} else
2715 		return (FALSE);
2716 }
2717 
2718 /*
2719  * Create the pv entries for each of the pages within a superpage.
2720  */
2721 static bool
2722 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags)
2723 {
2724 	struct md_page *pvh;
2725 	pv_entry_t pv;
2726 	bool noreclaim;
2727 
2728 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2729 	noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0;
2730 	if ((noreclaim && pv_entry_count >= pv_entry_high_water) ||
2731 	    (pv = get_pv_entry(pmap, noreclaim)) == NULL)
2732 		return (false);
2733 	pv->pv_va = va;
2734 	pvh = pa_to_pvh(pde & PG_PS_FRAME);
2735 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2736 	return (true);
2737 }
2738 
2739 /*
2740  * Fills a page table page with mappings to consecutive physical pages.
2741  */
2742 static void
2743 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
2744 {
2745 	pt_entry_t *pte;
2746 
2747 	for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
2748 		*pte = newpte;
2749 		newpte += PAGE_SIZE;
2750 	}
2751 }
2752 
2753 /*
2754  * Tries to demote a 2- or 4MB page mapping.  If demotion fails, the
2755  * 2- or 4MB page mapping is invalidated.
2756  */
2757 static boolean_t
2758 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2759 {
2760 	pd_entry_t newpde, oldpde;
2761 	pt_entry_t *firstpte, newpte;
2762 	vm_paddr_t mptepa;
2763 	vm_page_t mpte;
2764 	struct spglist free;
2765 	vm_offset_t sva;
2766 
2767 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2768 	oldpde = *pde;
2769 	KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
2770 	    ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
2771 	if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
2772 	    NULL) {
2773 		KASSERT((oldpde & PG_W) == 0,
2774 		    ("pmap_demote_pde: page table page for a wired mapping"
2775 		    " is missing"));
2776 
2777 		/*
2778 		 * Invalidate the 2- or 4MB page mapping and return
2779 		 * "failure" if the mapping was never accessed or the
2780 		 * allocation of the new page table page fails.
2781 		 */
2782 		if ((oldpde & PG_A) == 0 ||
2783 		    (mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
2784 			SLIST_INIT(&free);
2785 			sva = trunc_4mpage(va);
2786 			pmap_remove_pde(pmap, pde, sva, &free);
2787 			if ((oldpde & PG_G) == 0)
2788 				pmap_invalidate_pde_page(pmap, sva, oldpde);
2789 			vm_page_free_pages_toq(&free, true);
2790 			CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
2791 			    " in pmap %p", va, pmap);
2792 			return (FALSE);
2793 		}
2794 		mpte->pindex = va >> PDRSHIFT;
2795 		if (pmap != kernel_pmap) {
2796 			mpte->ref_count = NPTEPG;
2797 			pmap->pm_stats.resident_count++;
2798 		}
2799 	}
2800 	mptepa = VM_PAGE_TO_PHYS(mpte);
2801 
2802 	/*
2803 	 * If the page mapping is in the kernel's address space, then the
2804 	 * KPTmap can provide access to the page table page.  Otherwise,
2805 	 * temporarily map the page table page (mpte) into the kernel's
2806 	 * address space at either PADDR1 or PADDR2.
2807 	 */
2808 	if (pmap == kernel_pmap)
2809 		firstpte = &KPTmap[i386_btop(trunc_4mpage(va))];
2810 	else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) {
2811 		if ((*PMAP1 & PG_FRAME) != mptepa) {
2812 			*PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2813 #ifdef SMP
2814 			PMAP1cpu = PCPU_GET(cpuid);
2815 #endif
2816 			invlcaddr(PADDR1);
2817 			PMAP1changed++;
2818 		} else
2819 #ifdef SMP
2820 		if (PMAP1cpu != PCPU_GET(cpuid)) {
2821 			PMAP1cpu = PCPU_GET(cpuid);
2822 			invlcaddr(PADDR1);
2823 			PMAP1changedcpu++;
2824 		} else
2825 #endif
2826 			PMAP1unchanged++;
2827 		firstpte = PADDR1;
2828 	} else {
2829 		mtx_lock(&PMAP2mutex);
2830 		if ((*PMAP2 & PG_FRAME) != mptepa) {
2831 			*PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2832 			pmap_invalidate_page_int(kernel_pmap,
2833 			    (vm_offset_t)PADDR2);
2834 		}
2835 		firstpte = PADDR2;
2836 	}
2837 	newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
2838 	KASSERT((oldpde & PG_A) != 0,
2839 	    ("pmap_demote_pde: oldpde is missing PG_A"));
2840 	KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
2841 	    ("pmap_demote_pde: oldpde is missing PG_M"));
2842 	newpte = oldpde & ~PG_PS;
2843 	if ((newpte & PG_PDE_PAT) != 0)
2844 		newpte ^= PG_PDE_PAT | PG_PTE_PAT;
2845 
2846 	/*
2847 	 * If the page table page is not leftover from an earlier promotion,
2848 	 * initialize it.
2849 	 */
2850 	if (vm_page_none_valid(mpte))
2851 		pmap_fill_ptp(firstpte, newpte);
2852 
2853 	KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
2854 	    ("pmap_demote_pde: firstpte and newpte map different physical"
2855 	    " addresses"));
2856 
2857 	/*
2858 	 * If the mapping has changed attributes, update the page table
2859 	 * entries.
2860 	 */
2861 	if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
2862 		pmap_fill_ptp(firstpte, newpte);
2863 
2864 	/*
2865 	 * Demote the mapping.  This pmap is locked.  The old PDE has
2866 	 * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
2867 	 * set.  Thus, there is no danger of a race with another
2868 	 * processor changing the setting of PG_A and/or PG_M between
2869 	 * the read above and the store below.
2870 	 */
2871 	if (workaround_erratum383)
2872 		pmap_update_pde(pmap, va, pde, newpde);
2873 	else if (pmap == kernel_pmap)
2874 		pmap_kenter_pde(va, newpde);
2875 	else
2876 		pde_store(pde, newpde);
2877 	if (firstpte == PADDR2)
2878 		mtx_unlock(&PMAP2mutex);
2879 
2880 	/*
2881 	 * Invalidate the recursive mapping of the page table page.
2882 	 */
2883 	pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va));
2884 
2885 	/*
2886 	 * Demote the pv entry.  This depends on the earlier demotion
2887 	 * of the mapping.  Specifically, the (re)creation of a per-
2888 	 * page pv entry might trigger the execution of pmap_collect(),
2889 	 * which might reclaim a newly (re)created per-page pv entry
2890 	 * and destroy the associated mapping.  In order to destroy
2891 	 * the mapping, the PDE must have already changed from mapping
2892 	 * the 2mpage to referencing the page table page.
2893 	 */
2894 	if ((oldpde & PG_MANAGED) != 0)
2895 		pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
2896 
2897 	pmap_pde_demotions++;
2898 	CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
2899 	    " in pmap %p", va, pmap);
2900 	return (TRUE);
2901 }
2902 
2903 /*
2904  * Removes a 2- or 4MB page mapping from the kernel pmap.
2905  */
2906 static void
2907 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2908 {
2909 	pd_entry_t newpde;
2910 	vm_paddr_t mptepa;
2911 	vm_page_t mpte;
2912 
2913 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2914 	mpte = pmap_remove_pt_page(pmap, va);
2915 	if (mpte == NULL)
2916 		panic("pmap_remove_kernel_pde: Missing pt page.");
2917 
2918 	mptepa = VM_PAGE_TO_PHYS(mpte);
2919 	newpde = mptepa | PG_M | PG_A | PG_RW | PG_V;
2920 
2921 	/*
2922 	 * If this page table page was unmapped by a promotion, then it
2923 	 * contains valid mappings.  Zero it to invalidate those mappings.
2924 	 */
2925 	if (vm_page_any_valid(mpte))
2926 		pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]);
2927 
2928 	/*
2929 	 * Remove the mapping.
2930 	 */
2931 	if (workaround_erratum383)
2932 		pmap_update_pde(pmap, va, pde, newpde);
2933 	else
2934 		pmap_kenter_pde(va, newpde);
2935 
2936 	/*
2937 	 * Invalidate the recursive mapping of the page table page.
2938 	 */
2939 	pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va));
2940 }
2941 
2942 /*
2943  * pmap_remove_pde: do the things to unmap a superpage in a process
2944  */
2945 static void
2946 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
2947     struct spglist *free)
2948 {
2949 	struct md_page *pvh;
2950 	pd_entry_t oldpde;
2951 	vm_offset_t eva, va;
2952 	vm_page_t m, mpte;
2953 
2954 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2955 	KASSERT((sva & PDRMASK) == 0,
2956 	    ("pmap_remove_pde: sva is not 4mpage aligned"));
2957 	oldpde = pte_load_clear(pdq);
2958 	if (oldpde & PG_W)
2959 		pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
2960 
2961 	/*
2962 	 * Machines that don't support invlpg, also don't support
2963 	 * PG_G.
2964 	 */
2965 	if ((oldpde & PG_G) != 0)
2966 		pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
2967 
2968 	pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2969 	if (oldpde & PG_MANAGED) {
2970 		pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
2971 		pmap_pvh_free(pvh, pmap, sva);
2972 		eva = sva + NBPDR;
2973 		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
2974 		    va < eva; va += PAGE_SIZE, m++) {
2975 			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
2976 				vm_page_dirty(m);
2977 			if (oldpde & PG_A)
2978 				vm_page_aflag_set(m, PGA_REFERENCED);
2979 			if (TAILQ_EMPTY(&m->md.pv_list) &&
2980 			    TAILQ_EMPTY(&pvh->pv_list))
2981 				vm_page_aflag_clear(m, PGA_WRITEABLE);
2982 		}
2983 	}
2984 	if (pmap == kernel_pmap) {
2985 		pmap_remove_kernel_pde(pmap, pdq, sva);
2986 	} else {
2987 		mpte = pmap_remove_pt_page(pmap, sva);
2988 		if (mpte != NULL) {
2989 			KASSERT(vm_page_all_valid(mpte),
2990 			    ("pmap_remove_pde: pte page not promoted"));
2991 			pmap->pm_stats.resident_count--;
2992 			KASSERT(mpte->ref_count == NPTEPG,
2993 			    ("pmap_remove_pde: pte page ref count error"));
2994 			mpte->ref_count = 0;
2995 			pmap_add_delayed_free_list(mpte, free, FALSE);
2996 		}
2997 	}
2998 }
2999 
3000 /*
3001  * pmap_remove_pte: do the things to unmap a page in a process
3002  */
3003 static int
3004 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
3005     struct spglist *free)
3006 {
3007 	pt_entry_t oldpte;
3008 	vm_page_t m;
3009 
3010 	rw_assert(&pvh_global_lock, RA_WLOCKED);
3011 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3012 	oldpte = pte_load_clear(ptq);
3013 	KASSERT(oldpte != 0,
3014 	    ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va));
3015 	if (oldpte & PG_W)
3016 		pmap->pm_stats.wired_count -= 1;
3017 	/*
3018 	 * Machines that don't support invlpg, also don't support
3019 	 * PG_G.
3020 	 */
3021 	if (oldpte & PG_G)
3022 		pmap_invalidate_page_int(kernel_pmap, va);
3023 	pmap->pm_stats.resident_count -= 1;
3024 	if (oldpte & PG_MANAGED) {
3025 		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
3026 		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3027 			vm_page_dirty(m);
3028 		if (oldpte & PG_A)
3029 			vm_page_aflag_set(m, PGA_REFERENCED);
3030 		pmap_remove_entry(pmap, m, va);
3031 	}
3032 	return (pmap_unuse_pt(pmap, va, free));
3033 }
3034 
3035 /*
3036  * Remove a single page from a process address space
3037  */
3038 static void
3039 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
3040 {
3041 	pt_entry_t *pte;
3042 
3043 	rw_assert(&pvh_global_lock, RA_WLOCKED);
3044 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
3045 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3046 	if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
3047 		return;
3048 	pmap_remove_pte(pmap, pte, va, free);
3049 	pmap_invalidate_page_int(pmap, va);
3050 }
3051 
3052 /*
3053  * Removes the specified range of addresses from the page table page.
3054  */
3055 static bool
3056 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3057     struct spglist *free)
3058 {
3059 	pt_entry_t *pte;
3060 	bool anyvalid;
3061 
3062 	rw_assert(&pvh_global_lock, RA_WLOCKED);
3063 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
3064 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3065 	anyvalid = false;
3066 	for (pte = pmap_pte_quick(pmap, sva); sva != eva; pte++,
3067 	    sva += PAGE_SIZE) {
3068 		if (*pte == 0)
3069 			continue;
3070 
3071 		/*
3072 		 * The TLB entry for a PG_G mapping is invalidated by
3073 		 * pmap_remove_pte().
3074 		 */
3075 		if ((*pte & PG_G) == 0)
3076 			anyvalid = true;
3077 
3078 		if (pmap_remove_pte(pmap, pte, sva, free))
3079 			break;
3080 	}
3081 	return (anyvalid);
3082 }
3083 
3084 /*
3085  *	Remove the given range of addresses from the specified map.
3086  *
3087  *	It is assumed that the start and end are properly
3088  *	rounded to the page size.
3089  */
3090 static void
3091 __CONCAT(PMTYPE, remove)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3092 {
3093 	vm_offset_t pdnxt;
3094 	pd_entry_t ptpaddr;
3095 	struct spglist free;
3096 	int anyvalid;
3097 
3098 	/*
3099 	 * Perform an unsynchronized read.  This is, however, safe.
3100 	 */
3101 	if (pmap->pm_stats.resident_count == 0)
3102 		return;
3103 
3104 	anyvalid = 0;
3105 	SLIST_INIT(&free);
3106 
3107 	rw_wlock(&pvh_global_lock);
3108 	sched_pin();
3109 	PMAP_LOCK(pmap);
3110 
3111 	/*
3112 	 * special handling of removing one page.  a very
3113 	 * common operation and easy to short circuit some
3114 	 * code.
3115 	 */
3116 	if ((sva + PAGE_SIZE == eva) &&
3117 	    ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
3118 		pmap_remove_page(pmap, sva, &free);
3119 		goto out;
3120 	}
3121 
3122 	for (; sva < eva; sva = pdnxt) {
3123 		u_int pdirindex;
3124 
3125 		/*
3126 		 * Calculate index for next page table.
3127 		 */
3128 		pdnxt = (sva + NBPDR) & ~PDRMASK;
3129 		if (pdnxt < sva)
3130 			pdnxt = eva;
3131 		if (pmap->pm_stats.resident_count == 0)
3132 			break;
3133 
3134 		pdirindex = sva >> PDRSHIFT;
3135 		ptpaddr = pmap->pm_pdir[pdirindex];
3136 
3137 		/*
3138 		 * Weed out invalid mappings. Note: we assume that the page
3139 		 * directory table is always allocated, and in kernel virtual.
3140 		 */
3141 		if (ptpaddr == 0)
3142 			continue;
3143 
3144 		/*
3145 		 * Check for large page.
3146 		 */
3147 		if ((ptpaddr & PG_PS) != 0) {
3148 			/*
3149 			 * Are we removing the entire large page?  If not,
3150 			 * demote the mapping and fall through.
3151 			 */
3152 			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
3153 				/*
3154 				 * The TLB entry for a PG_G mapping is
3155 				 * invalidated by pmap_remove_pde().
3156 				 */
3157 				if ((ptpaddr & PG_G) == 0)
3158 					anyvalid = 1;
3159 				pmap_remove_pde(pmap,
3160 				    &pmap->pm_pdir[pdirindex], sva, &free);
3161 				continue;
3162 			} else if (!pmap_demote_pde(pmap,
3163 			    &pmap->pm_pdir[pdirindex], sva)) {
3164 				/* The large page mapping was destroyed. */
3165 				continue;
3166 			}
3167 		}
3168 
3169 		/*
3170 		 * Limit our scan to either the end of the va represented
3171 		 * by the current page table page, or to the end of the
3172 		 * range being removed.
3173 		 */
3174 		if (pdnxt > eva)
3175 			pdnxt = eva;
3176 
3177 		if (pmap_remove_ptes(pmap, sva, pdnxt, &free))
3178 			anyvalid = 1;
3179 	}
3180 out:
3181 	sched_unpin();
3182 	if (anyvalid)
3183 		pmap_invalidate_all_int(pmap);
3184 	rw_wunlock(&pvh_global_lock);
3185 	PMAP_UNLOCK(pmap);
3186 	vm_page_free_pages_toq(&free, true);
3187 }
3188 
3189 /*
3190  *	Routine:	pmap_remove_all
3191  *	Function:
3192  *		Removes this physical page from
3193  *		all physical maps in which it resides.
3194  *		Reflects back modify bits to the pager.
3195  *
3196  *	Notes:
3197  *		Original versions of this routine were very
3198  *		inefficient because they iteratively called
3199  *		pmap_remove (slow...)
3200  */
3201 
3202 static void
3203 __CONCAT(PMTYPE, remove_all)(vm_page_t m)
3204 {
3205 	struct md_page *pvh;
3206 	pv_entry_t pv;
3207 	pmap_t pmap;
3208 	pt_entry_t *pte, tpte;
3209 	pd_entry_t *pde;
3210 	vm_offset_t va;
3211 	struct spglist free;
3212 
3213 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3214 	    ("pmap_remove_all: page %p is not managed", m));
3215 	SLIST_INIT(&free);
3216 	rw_wlock(&pvh_global_lock);
3217 	sched_pin();
3218 	if ((m->flags & PG_FICTITIOUS) != 0)
3219 		goto small_mappings;
3220 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3221 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
3222 		va = pv->pv_va;
3223 		pmap = PV_PMAP(pv);
3224 		PMAP_LOCK(pmap);
3225 		pde = pmap_pde(pmap, va);
3226 		(void)pmap_demote_pde(pmap, pde, va);
3227 		PMAP_UNLOCK(pmap);
3228 	}
3229 small_mappings:
3230 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3231 		pmap = PV_PMAP(pv);
3232 		PMAP_LOCK(pmap);
3233 		pmap->pm_stats.resident_count--;
3234 		pde = pmap_pde(pmap, pv->pv_va);
3235 		KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
3236 		    " a 4mpage in page %p's pv list", m));
3237 		pte = pmap_pte_quick(pmap, pv->pv_va);
3238 		tpte = pte_load_clear(pte);
3239 		KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte",
3240 		    pmap, pv->pv_va));
3241 		if (tpte & PG_W)
3242 			pmap->pm_stats.wired_count--;
3243 		if (tpte & PG_A)
3244 			vm_page_aflag_set(m, PGA_REFERENCED);
3245 
3246 		/*
3247 		 * Update the vm_page_t clean and reference bits.
3248 		 */
3249 		if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3250 			vm_page_dirty(m);
3251 		pmap_unuse_pt(pmap, pv->pv_va, &free);
3252 		pmap_invalidate_page_int(pmap, pv->pv_va);
3253 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3254 		free_pv_entry(pmap, pv);
3255 		PMAP_UNLOCK(pmap);
3256 	}
3257 	vm_page_aflag_clear(m, PGA_WRITEABLE);
3258 	sched_unpin();
3259 	rw_wunlock(&pvh_global_lock);
3260 	vm_page_free_pages_toq(&free, true);
3261 }
3262 
3263 /*
3264  * pmap_protect_pde: do the things to protect a 4mpage in a process
3265  */
3266 static boolean_t
3267 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
3268 {
3269 	pd_entry_t newpde, oldpde;
3270 	vm_page_t m, mt;
3271 	boolean_t anychanged;
3272 
3273 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3274 	KASSERT((sva & PDRMASK) == 0,
3275 	    ("pmap_protect_pde: sva is not 4mpage aligned"));
3276 	anychanged = FALSE;
3277 retry:
3278 	oldpde = newpde = *pde;
3279 	if ((prot & VM_PROT_WRITE) == 0) {
3280 		if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
3281 		    (PG_MANAGED | PG_M | PG_RW)) {
3282 			m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
3283 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
3284 				vm_page_dirty(mt);
3285 		}
3286 		newpde &= ~(PG_RW | PG_M);
3287 	}
3288 #ifdef PMAP_PAE_COMP
3289 	if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
3290 		newpde |= pg_nx;
3291 #endif
3292 	if (newpde != oldpde) {
3293 		/*
3294 		 * As an optimization to future operations on this PDE, clear
3295 		 * PG_PROMOTED.  The impending invalidation will remove any
3296 		 * lingering 4KB page mappings from the TLB.
3297 		 */
3298 		if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED))
3299 			goto retry;
3300 		if ((oldpde & PG_G) != 0)
3301 			pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
3302 		else
3303 			anychanged = TRUE;
3304 	}
3305 	return (anychanged);
3306 }
3307 
3308 /*
3309  *	Set the physical protection on the
3310  *	specified range of this map as requested.
3311  */
3312 static void
3313 __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3314     vm_prot_t prot)
3315 {
3316 	vm_offset_t pdnxt;
3317 	pd_entry_t ptpaddr;
3318 	pt_entry_t *pte;
3319 	boolean_t anychanged, pv_lists_locked;
3320 
3321 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
3322 	if (prot == VM_PROT_NONE) {
3323 		pmap_remove(pmap, sva, eva);
3324 		return;
3325 	}
3326 
3327 #ifdef PMAP_PAE_COMP
3328 	if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
3329 	    (VM_PROT_WRITE | VM_PROT_EXECUTE))
3330 		return;
3331 #else
3332 	if (prot & VM_PROT_WRITE)
3333 		return;
3334 #endif
3335 
3336 	if (pmap_is_current(pmap))
3337 		pv_lists_locked = FALSE;
3338 	else {
3339 		pv_lists_locked = TRUE;
3340 resume:
3341 		rw_wlock(&pvh_global_lock);
3342 		sched_pin();
3343 	}
3344 	anychanged = FALSE;
3345 
3346 	PMAP_LOCK(pmap);
3347 	for (; sva < eva; sva = pdnxt) {
3348 		pt_entry_t obits, pbits;
3349 		u_int pdirindex;
3350 
3351 		pdnxt = (sva + NBPDR) & ~PDRMASK;
3352 		if (pdnxt < sva)
3353 			pdnxt = eva;
3354 
3355 		pdirindex = sva >> PDRSHIFT;
3356 		ptpaddr = pmap->pm_pdir[pdirindex];
3357 
3358 		/*
3359 		 * Weed out invalid mappings. Note: we assume that the page
3360 		 * directory table is always allocated, and in kernel virtual.
3361 		 */
3362 		if (ptpaddr == 0)
3363 			continue;
3364 
3365 		/*
3366 		 * Check for large page.
3367 		 */
3368 		if ((ptpaddr & PG_PS) != 0) {
3369 			/*
3370 			 * Are we protecting the entire large page?  If not,
3371 			 * demote the mapping and fall through.
3372 			 */
3373 			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
3374 				/*
3375 				 * The TLB entry for a PG_G mapping is
3376 				 * invalidated by pmap_protect_pde().
3377 				 */
3378 				if (pmap_protect_pde(pmap,
3379 				    &pmap->pm_pdir[pdirindex], sva, prot))
3380 					anychanged = TRUE;
3381 				continue;
3382 			} else {
3383 				if (!pv_lists_locked) {
3384 					pv_lists_locked = TRUE;
3385 					if (!rw_try_wlock(&pvh_global_lock)) {
3386 						if (anychanged)
3387 							pmap_invalidate_all_int(
3388 							    pmap);
3389 						PMAP_UNLOCK(pmap);
3390 						goto resume;
3391 					}
3392 					sched_pin();
3393 				}
3394 				if (!pmap_demote_pde(pmap,
3395 				    &pmap->pm_pdir[pdirindex], sva)) {
3396 					/*
3397 					 * The large page mapping was
3398 					 * destroyed.
3399 					 */
3400 					continue;
3401 				}
3402 			}
3403 		}
3404 
3405 		if (pdnxt > eva)
3406 			pdnxt = eva;
3407 
3408 		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3409 		    sva += PAGE_SIZE) {
3410 			vm_page_t m;
3411 
3412 retry:
3413 			/*
3414 			 * Regardless of whether a pte is 32 or 64 bits in
3415 			 * size, PG_RW, PG_A, and PG_M are among the least
3416 			 * significant 32 bits.
3417 			 */
3418 			obits = pbits = *pte;
3419 			if ((pbits & PG_V) == 0)
3420 				continue;
3421 
3422 			if ((prot & VM_PROT_WRITE) == 0) {
3423 				if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
3424 				    (PG_MANAGED | PG_M | PG_RW)) {
3425 					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
3426 					vm_page_dirty(m);
3427 				}
3428 				pbits &= ~(PG_RW | PG_M);
3429 			}
3430 #ifdef PMAP_PAE_COMP
3431 			if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
3432 				pbits |= pg_nx;
3433 #endif
3434 
3435 			if (pbits != obits) {
3436 #ifdef PMAP_PAE_COMP
3437 				if (!atomic_cmpset_64(pte, obits, pbits))
3438 					goto retry;
3439 #else
3440 				if (!atomic_cmpset_int((u_int *)pte, obits,
3441 				    pbits))
3442 					goto retry;
3443 #endif
3444 				if (obits & PG_G)
3445 					pmap_invalidate_page_int(pmap, sva);
3446 				else
3447 					anychanged = TRUE;
3448 			}
3449 		}
3450 	}
3451 	if (anychanged)
3452 		pmap_invalidate_all_int(pmap);
3453 	if (pv_lists_locked) {
3454 		sched_unpin();
3455 		rw_wunlock(&pvh_global_lock);
3456 	}
3457 	PMAP_UNLOCK(pmap);
3458 }
3459 
3460 #if VM_NRESERVLEVEL > 0
3461 /*
3462  * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
3463  * within a single page table page (PTP) to a single 2- or 4MB page mapping.
3464  * For promotion to occur, two conditions must be met: (1) the 4KB page
3465  * mappings must map aligned, contiguous physical memory and (2) the 4KB page
3466  * mappings must have identical characteristics.
3467  *
3468  * Managed (PG_MANAGED) mappings within the kernel address space are not
3469  * promoted.  The reason is that kernel PDEs are replicated in each pmap but
3470  * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
3471  * pmap.
3472  */
3473 static void
3474 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
3475 {
3476 	pd_entry_t newpde;
3477 	pt_entry_t *firstpte, oldpte, pa, *pte;
3478 #ifdef KTR
3479 	vm_offset_t oldpteva;
3480 #endif
3481 	vm_page_t mpte;
3482 
3483 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3484 
3485 	/*
3486 	 * Examine the first PTE in the specified PTP.  Abort if this PTE is
3487 	 * either invalid, unused, or does not map the first 4KB physical page
3488 	 * within a 2- or 4MB page.
3489 	 */
3490 	firstpte = pmap_pte_quick(pmap, trunc_4mpage(va));
3491 setpde:
3492 	newpde = *firstpte;
3493 	if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
3494 		pmap_pde_p_failures++;
3495 		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3496 		    " in pmap %p", va, pmap);
3497 		return;
3498 	}
3499 	if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
3500 		pmap_pde_p_failures++;
3501 		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3502 		    " in pmap %p", va, pmap);
3503 		return;
3504 	}
3505 	if ((newpde & (PG_M | PG_RW)) == PG_RW) {
3506 		/*
3507 		 * When PG_M is already clear, PG_RW can be cleared without
3508 		 * a TLB invalidation.
3509 		 */
3510 		if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
3511 		    ~PG_RW))
3512 			goto setpde;
3513 		newpde &= ~PG_RW;
3514 	}
3515 
3516 	/*
3517 	 * Examine each of the other PTEs in the specified PTP.  Abort if this
3518 	 * PTE maps an unexpected 4KB physical page or does not have identical
3519 	 * characteristics to the first PTE.
3520 	 */
3521 	pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
3522 	for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
3523 setpte:
3524 		oldpte = *pte;
3525 		if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
3526 			pmap_pde_p_failures++;
3527 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3528 			    " in pmap %p", va, pmap);
3529 			return;
3530 		}
3531 		if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
3532 			/*
3533 			 * When PG_M is already clear, PG_RW can be cleared
3534 			 * without a TLB invalidation.
3535 			 */
3536 			if (!atomic_cmpset_int((u_int *)pte, oldpte,
3537 			    oldpte & ~PG_RW))
3538 				goto setpte;
3539 			oldpte &= ~PG_RW;
3540 #ifdef KTR
3541 			oldpteva = (oldpte & PG_FRAME & PDRMASK) |
3542 			    (va & ~PDRMASK);
3543 #endif
3544 			CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
3545 			    " in pmap %p", oldpteva, pmap);
3546 		}
3547 		if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
3548 			pmap_pde_p_failures++;
3549 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3550 			    " in pmap %p", va, pmap);
3551 			return;
3552 		}
3553 		pa -= PAGE_SIZE;
3554 	}
3555 
3556 	/*
3557 	 * Save the page table page in its current state until the PDE
3558 	 * mapping the superpage is demoted by pmap_demote_pde() or
3559 	 * destroyed by pmap_remove_pde().
3560 	 */
3561 	mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
3562 	KASSERT(mpte >= vm_page_array &&
3563 	    mpte < &vm_page_array[vm_page_array_size],
3564 	    ("pmap_promote_pde: page table page is out of range"));
3565 	KASSERT(mpte->pindex == va >> PDRSHIFT,
3566 	    ("pmap_promote_pde: page table page's pindex is wrong"));
3567 	if (pmap_insert_pt_page(pmap, mpte, true)) {
3568 		pmap_pde_p_failures++;
3569 		CTR2(KTR_PMAP,
3570 		    "pmap_promote_pde: failure for va %#x in pmap %p", va,
3571 		    pmap);
3572 		return;
3573 	}
3574 
3575 	/*
3576 	 * Promote the pv entries.
3577 	 */
3578 	if ((newpde & PG_MANAGED) != 0)
3579 		pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
3580 
3581 	/*
3582 	 * Propagate the PAT index to its proper position.
3583 	 */
3584 	if ((newpde & PG_PTE_PAT) != 0)
3585 		newpde ^= PG_PDE_PAT | PG_PTE_PAT;
3586 
3587 	/*
3588 	 * Map the superpage.
3589 	 */
3590 	if (workaround_erratum383)
3591 		pmap_update_pde(pmap, va, pde, PG_PS | newpde);
3592 	else if (pmap == kernel_pmap)
3593 		pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde);
3594 	else
3595 		pde_store(pde, PG_PROMOTED | PG_PS | newpde);
3596 
3597 	pmap_pde_promotions++;
3598 	CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
3599 	    " in pmap %p", va, pmap);
3600 }
3601 #endif /* VM_NRESERVLEVEL > 0 */
3602 
3603 /*
3604  *	Insert the given physical page (p) at
3605  *	the specified virtual address (v) in the
3606  *	target physical map with the protection requested.
3607  *
3608  *	If specified, the page will be wired down, meaning
3609  *	that the related pte can not be reclaimed.
3610  *
3611  *	NB:  This is the only routine which MAY NOT lazy-evaluate
3612  *	or lose information.  That is, this routine must actually
3613  *	insert this page into the given map NOW.
3614  */
3615 static int
3616 __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
3617     vm_prot_t prot, u_int flags, int8_t psind)
3618 {
3619 	pd_entry_t *pde;
3620 	pt_entry_t *pte;
3621 	pt_entry_t newpte, origpte;
3622 	pv_entry_t pv;
3623 	vm_paddr_t opa, pa;
3624 	vm_page_t mpte, om;
3625 	int rv;
3626 
3627 	va = trunc_page(va);
3628 	KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) ||
3629 	    (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS),
3630 	    ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va));
3631 	KASSERT(va < PMAP_TRM_MIN_ADDRESS,
3632 	    ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)",
3633 	    va));
3634 	KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 ||
3635 	    !VA_IS_CLEANMAP(va),
3636 	    ("pmap_enter: managed mapping within the clean submap"));
3637 	if ((m->oflags & VPO_UNMANAGED) == 0)
3638 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
3639 	KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
3640 	    ("pmap_enter: flags %u has reserved bits set", flags));
3641 	pa = VM_PAGE_TO_PHYS(m);
3642 	newpte = (pt_entry_t)(pa | PG_A | PG_V);
3643 	if ((flags & VM_PROT_WRITE) != 0)
3644 		newpte |= PG_M;
3645 	if ((prot & VM_PROT_WRITE) != 0)
3646 		newpte |= PG_RW;
3647 	KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
3648 	    ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
3649 #ifdef PMAP_PAE_COMP
3650 	if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
3651 		newpte |= pg_nx;
3652 #endif
3653 	if ((flags & PMAP_ENTER_WIRED) != 0)
3654 		newpte |= PG_W;
3655 	if (pmap != kernel_pmap)
3656 		newpte |= PG_U;
3657 	newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
3658 	if ((m->oflags & VPO_UNMANAGED) == 0)
3659 		newpte |= PG_MANAGED;
3660 
3661 	rw_wlock(&pvh_global_lock);
3662 	PMAP_LOCK(pmap);
3663 	sched_pin();
3664 	if (psind == 1) {
3665 		/* Assert the required virtual and physical alignment. */
3666 		KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
3667 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
3668 		rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m);
3669 		goto out;
3670 	}
3671 
3672 	pde = pmap_pde(pmap, va);
3673 	if (pmap != kernel_pmap) {
3674 		/*
3675 		 * va is for UVA.
3676 		 * In the case that a page table page is not resident,
3677 		 * we are creating it here.  pmap_allocpte() handles
3678 		 * demotion.
3679 		 */
3680 		mpte = pmap_allocpte(pmap, va, flags);
3681 		if (mpte == NULL) {
3682 			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
3683 			    ("pmap_allocpte failed with sleep allowed"));
3684 			rv = KERN_RESOURCE_SHORTAGE;
3685 			goto out;
3686 		}
3687 	} else {
3688 		/*
3689 		 * va is for KVA, so pmap_demote_pde() will never fail
3690 		 * to install a page table page.  PG_V is also
3691 		 * asserted by pmap_demote_pde().
3692 		 */
3693 		mpte = NULL;
3694 		KASSERT(pde != NULL && (*pde & PG_V) != 0,
3695 		    ("KVA %#x invalid pde pdir %#jx", va,
3696 		    (uintmax_t)pmap->pm_pdir[PTDPTDI]));
3697 		if ((*pde & PG_PS) != 0)
3698 			pmap_demote_pde(pmap, pde, va);
3699 	}
3700 	pte = pmap_pte_quick(pmap, va);
3701 
3702 	/*
3703 	 * Page Directory table entry is not valid, which should not
3704 	 * happen.  We should have either allocated the page table
3705 	 * page or demoted the existing mapping above.
3706 	 */
3707 	if (pte == NULL) {
3708 		panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
3709 		    (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
3710 	}
3711 
3712 	origpte = *pte;
3713 	pv = NULL;
3714 
3715 	/*
3716 	 * Is the specified virtual address already mapped?
3717 	 */
3718 	if ((origpte & PG_V) != 0) {
3719 		/*
3720 		 * Wiring change, just update stats. We don't worry about
3721 		 * wiring PT pages as they remain resident as long as there
3722 		 * are valid mappings in them. Hence, if a user page is wired,
3723 		 * the PT page will be also.
3724 		 */
3725 		if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
3726 			pmap->pm_stats.wired_count++;
3727 		else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
3728 			pmap->pm_stats.wired_count--;
3729 
3730 		/*
3731 		 * Remove the extra PT page reference.
3732 		 */
3733 		if (mpte != NULL) {
3734 			mpte->ref_count--;
3735 			KASSERT(mpte->ref_count > 0,
3736 			    ("pmap_enter: missing reference to page table page,"
3737 			     " va: 0x%x", va));
3738 		}
3739 
3740 		/*
3741 		 * Has the physical page changed?
3742 		 */
3743 		opa = origpte & PG_FRAME;
3744 		if (opa == pa) {
3745 			/*
3746 			 * No, might be a protection or wiring change.
3747 			 */
3748 			if ((origpte & PG_MANAGED) != 0 &&
3749 			    (newpte & PG_RW) != 0)
3750 				vm_page_aflag_set(m, PGA_WRITEABLE);
3751 			if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
3752 				goto unchanged;
3753 			goto validate;
3754 		}
3755 
3756 		/*
3757 		 * The physical page has changed.  Temporarily invalidate
3758 		 * the mapping.  This ensures that all threads sharing the
3759 		 * pmap keep a consistent view of the mapping, which is
3760 		 * necessary for the correct handling of COW faults.  It
3761 		 * also permits reuse of the old mapping's PV entry,
3762 		 * avoiding an allocation.
3763 		 *
3764 		 * For consistency, handle unmanaged mappings the same way.
3765 		 */
3766 		origpte = pte_load_clear(pte);
3767 		KASSERT((origpte & PG_FRAME) == opa,
3768 		    ("pmap_enter: unexpected pa update for %#x", va));
3769 		if ((origpte & PG_MANAGED) != 0) {
3770 			om = PHYS_TO_VM_PAGE(opa);
3771 
3772 			/*
3773 			 * The pmap lock is sufficient to synchronize with
3774 			 * concurrent calls to pmap_page_test_mappings() and
3775 			 * pmap_ts_referenced().
3776 			 */
3777 			if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3778 				vm_page_dirty(om);
3779 			if ((origpte & PG_A) != 0) {
3780 				pmap_invalidate_page_int(pmap, va);
3781 				vm_page_aflag_set(om, PGA_REFERENCED);
3782 			}
3783 			pv = pmap_pvh_remove(&om->md, pmap, va);
3784 			KASSERT(pv != NULL,
3785 			    ("pmap_enter: no PV entry for %#x", va));
3786 			if ((newpte & PG_MANAGED) == 0)
3787 				free_pv_entry(pmap, pv);
3788 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3789 			    TAILQ_EMPTY(&om->md.pv_list) &&
3790 			    ((om->flags & PG_FICTITIOUS) != 0 ||
3791 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3792 				vm_page_aflag_clear(om, PGA_WRITEABLE);
3793 		} else {
3794 			/*
3795 			 * Since this mapping is unmanaged, assume that PG_A
3796 			 * is set.
3797 			 */
3798 			pmap_invalidate_page_int(pmap, va);
3799 		}
3800 		origpte = 0;
3801 	} else {
3802 		/*
3803 		 * Increment the counters.
3804 		 */
3805 		if ((newpte & PG_W) != 0)
3806 			pmap->pm_stats.wired_count++;
3807 		pmap->pm_stats.resident_count++;
3808 	}
3809 
3810 	/*
3811 	 * Enter on the PV list if part of our managed memory.
3812 	 */
3813 	if ((newpte & PG_MANAGED) != 0) {
3814 		if (pv == NULL) {
3815 			pv = get_pv_entry(pmap, FALSE);
3816 			pv->pv_va = va;
3817 		}
3818 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3819 		if ((newpte & PG_RW) != 0)
3820 			vm_page_aflag_set(m, PGA_WRITEABLE);
3821 	}
3822 
3823 	/*
3824 	 * Update the PTE.
3825 	 */
3826 	if ((origpte & PG_V) != 0) {
3827 validate:
3828 		origpte = pte_load_store(pte, newpte);
3829 		KASSERT((origpte & PG_FRAME) == pa,
3830 		    ("pmap_enter: unexpected pa update for %#x", va));
3831 		if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
3832 		    (PG_M | PG_RW)) {
3833 			if ((origpte & PG_MANAGED) != 0)
3834 				vm_page_dirty(m);
3835 
3836 			/*
3837 			 * Although the PTE may still have PG_RW set, TLB
3838 			 * invalidation may nonetheless be required because
3839 			 * the PTE no longer has PG_M set.
3840 			 */
3841 		}
3842 #ifdef PMAP_PAE_COMP
3843 		else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
3844 			/*
3845 			 * This PTE change does not require TLB invalidation.
3846 			 */
3847 			goto unchanged;
3848 		}
3849 #endif
3850 		if ((origpte & PG_A) != 0)
3851 			pmap_invalidate_page_int(pmap, va);
3852 	} else
3853 		pte_store_zero(pte, newpte);
3854 
3855 unchanged:
3856 
3857 #if VM_NRESERVLEVEL > 0
3858 	/*
3859 	 * If both the page table page and the reservation are fully
3860 	 * populated, then attempt promotion.
3861 	 */
3862 	if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
3863 	    pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
3864 	    vm_reserv_level_iffullpop(m) == 0)
3865 		pmap_promote_pde(pmap, pde, va);
3866 #endif
3867 
3868 	rv = KERN_SUCCESS;
3869 out:
3870 	sched_unpin();
3871 	rw_wunlock(&pvh_global_lock);
3872 	PMAP_UNLOCK(pmap);
3873 	return (rv);
3874 }
3875 
3876 /*
3877  * Tries to create a read- and/or execute-only 2 or 4 MB page mapping.  Returns
3878  * true if successful.  Returns false if (1) a mapping already exists at the
3879  * specified virtual address or (2) a PV entry cannot be allocated without
3880  * reclaiming another PV entry.
3881  */
3882 static bool
3883 pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3884 {
3885 	pd_entry_t newpde;
3886 
3887 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3888 	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
3889 	    PG_PS | PG_V;
3890 	if ((m->oflags & VPO_UNMANAGED) == 0)
3891 		newpde |= PG_MANAGED;
3892 #ifdef PMAP_PAE_COMP
3893 	if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
3894 		newpde |= pg_nx;
3895 #endif
3896 	if (pmap != kernel_pmap)
3897 		newpde |= PG_U;
3898 	return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
3899 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL) ==
3900 	    KERN_SUCCESS);
3901 }
3902 
3903 /*
3904  * Returns true if every page table entry in the page table page that maps
3905  * the specified kernel virtual address is zero.
3906  */
3907 static bool
3908 pmap_every_pte_zero(vm_offset_t va)
3909 {
3910 	pt_entry_t *pt_end, *pte;
3911 
3912 	KASSERT((va & PDRMASK) == 0, ("va is misaligned"));
3913 	pte = vtopte(va);
3914 	for (pt_end = pte + NPTEPG; pte < pt_end; pte++) {
3915 		if (*pte != 0)
3916 			return (false);
3917 	}
3918 	return (true);
3919 }
3920 
3921 /*
3922  * Tries to create the specified 2 or 4 MB page mapping.  Returns KERN_SUCCESS
3923  * if the mapping was created, and either KERN_FAILURE or
3924  * KERN_RESOURCE_SHORTAGE otherwise.  Returns KERN_FAILURE if
3925  * PMAP_ENTER_NOREPLACE was specified and a mapping already exists at the
3926  * specified virtual address.  Returns KERN_RESOURCE_SHORTAGE if
3927  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3928  *
3929  * The parameter "m" is only used when creating a managed, writeable mapping.
3930  */
3931 static int
3932 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
3933     vm_page_t m)
3934 {
3935 	struct spglist free;
3936 	pd_entry_t oldpde, *pde;
3937 	vm_page_t mt;
3938 
3939 	rw_assert(&pvh_global_lock, RA_WLOCKED);
3940 	KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
3941 	    ("pmap_enter_pde: newpde is missing PG_M"));
3942 	KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0,
3943 	    ("pmap_enter_pde: cannot create wired user mapping"));
3944 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3945 	pde = pmap_pde(pmap, va);
3946 	oldpde = *pde;
3947 	if ((oldpde & PG_V) != 0) {
3948 		if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (pmap !=
3949 		    kernel_pmap || (oldpde & PG_PS) != 0 ||
3950 		    !pmap_every_pte_zero(va))) {
3951 			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3952 			    " in pmap %p", va, pmap);
3953 			return (KERN_FAILURE);
3954 		}
3955 		/* Break the existing mapping(s). */
3956 		SLIST_INIT(&free);
3957 		if ((oldpde & PG_PS) != 0) {
3958 			/*
3959 			 * If the PDE resulted from a promotion, then a
3960 			 * reserved PT page could be freed.
3961 			 */
3962 			(void)pmap_remove_pde(pmap, pde, va, &free);
3963 			if ((oldpde & PG_G) == 0)
3964 				pmap_invalidate_pde_page(pmap, va, oldpde);
3965 		} else {
3966 			if (pmap_remove_ptes(pmap, va, va + NBPDR, &free))
3967 		               pmap_invalidate_all_int(pmap);
3968 		}
3969 		if (pmap != kernel_pmap) {
3970 			vm_page_free_pages_toq(&free, true);
3971 			KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
3972 			    pde));
3973 		} else {
3974 			KASSERT(SLIST_EMPTY(&free),
3975 			    ("pmap_enter_pde: freed kernel page table page"));
3976 
3977 			/*
3978 			 * Both pmap_remove_pde() and pmap_remove_ptes() will
3979 			 * leave the kernel page table page zero filled.
3980 			 */
3981 			mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
3982 			if (pmap_insert_pt_page(pmap, mt, false))
3983 				panic("pmap_enter_pde: trie insert failed");
3984 		}
3985 	}
3986 	if ((newpde & PG_MANAGED) != 0) {
3987 		/*
3988 		 * Abort this mapping if its PV entry could not be created.
3989 		 */
3990 		if (!pmap_pv_insert_pde(pmap, va, newpde, flags)) {
3991 			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3992 			    " in pmap %p", va, pmap);
3993 			return (KERN_RESOURCE_SHORTAGE);
3994 		}
3995 		if ((newpde & PG_RW) != 0) {
3996 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
3997 				vm_page_aflag_set(mt, PGA_WRITEABLE);
3998 		}
3999 	}
4000 
4001 	/*
4002 	 * Increment counters.
4003 	 */
4004 	if ((newpde & PG_W) != 0)
4005 		pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
4006 	pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
4007 
4008 	/*
4009 	 * Map the superpage.  (This is not a promoted mapping; there will not
4010 	 * be any lingering 4KB page mappings in the TLB.)
4011 	 */
4012 	pde_store(pde, newpde);
4013 
4014 	pmap_pde_mappings++;
4015 	CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
4016 	    va, pmap);
4017 	return (KERN_SUCCESS);
4018 }
4019 
4020 /*
4021  * Maps a sequence of resident pages belonging to the same object.
4022  * The sequence begins with the given page m_start.  This page is
4023  * mapped at the given virtual address start.  Each subsequent page is
4024  * mapped at a virtual address that is offset from start by the same
4025  * amount as the page is offset from m_start within the object.  The
4026  * last page in the sequence is the page with the largest offset from
4027  * m_start that can be mapped at a virtual address less than the given
4028  * virtual address end.  Not every virtual page between start and end
4029  * is mapped; only those for which a resident page exists with the
4030  * corresponding offset from m_start are mapped.
4031  */
4032 static void
4033 __CONCAT(PMTYPE, enter_object)(pmap_t pmap, vm_offset_t start, vm_offset_t end,
4034     vm_page_t m_start, vm_prot_t prot)
4035 {
4036 	vm_offset_t va;
4037 	vm_page_t m, mpte;
4038 	vm_pindex_t diff, psize;
4039 
4040 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
4041 
4042 	psize = atop(end - start);
4043 	mpte = NULL;
4044 	m = m_start;
4045 	rw_wlock(&pvh_global_lock);
4046 	PMAP_LOCK(pmap);
4047 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
4048 		va = start + ptoa(diff);
4049 		if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
4050 		    m->psind == 1 && pg_ps_enabled &&
4051 		    pmap_enter_4mpage(pmap, va, m, prot))
4052 			m = &m[NBPDR / PAGE_SIZE - 1];
4053 		else
4054 			mpte = pmap_enter_quick_locked(pmap, va, m, prot,
4055 			    mpte);
4056 		m = TAILQ_NEXT(m, listq);
4057 	}
4058 	rw_wunlock(&pvh_global_lock);
4059 	PMAP_UNLOCK(pmap);
4060 }
4061 
4062 /*
4063  * this code makes some *MAJOR* assumptions:
4064  * 1. Current pmap & pmap exists.
4065  * 2. Not wired.
4066  * 3. Read access.
4067  * 4. No page table pages.
4068  * but is *MUCH* faster than pmap_enter...
4069  */
4070 
4071 static void
4072 __CONCAT(PMTYPE, enter_quick)(pmap_t pmap, vm_offset_t va, vm_page_t m,
4073     vm_prot_t prot)
4074 {
4075 
4076 	rw_wlock(&pvh_global_lock);
4077 	PMAP_LOCK(pmap);
4078 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
4079 	rw_wunlock(&pvh_global_lock);
4080 	PMAP_UNLOCK(pmap);
4081 }
4082 
4083 static vm_page_t
4084 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
4085     vm_prot_t prot, vm_page_t mpte)
4086 {
4087 	pt_entry_t newpte, *pte;
4088 
4089 	KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) ||
4090 	    (m->oflags & VPO_UNMANAGED) != 0,
4091 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
4092 	rw_assert(&pvh_global_lock, RA_WLOCKED);
4093 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4094 
4095 	/*
4096 	 * In the case that a page table page is not
4097 	 * resident, we are creating it here.
4098 	 */
4099 	if (pmap != kernel_pmap) {
4100 		u_int ptepindex;
4101 		pd_entry_t ptepa;
4102 
4103 		/*
4104 		 * Calculate pagetable page index
4105 		 */
4106 		ptepindex = va >> PDRSHIFT;
4107 		if (mpte && (mpte->pindex == ptepindex)) {
4108 			mpte->ref_count++;
4109 		} else {
4110 			/*
4111 			 * Get the page directory entry
4112 			 */
4113 			ptepa = pmap->pm_pdir[ptepindex];
4114 
4115 			/*
4116 			 * If the page table page is mapped, we just increment
4117 			 * the hold count, and activate it.
4118 			 */
4119 			if (ptepa) {
4120 				if (ptepa & PG_PS)
4121 					return (NULL);
4122 				mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
4123 				mpte->ref_count++;
4124 			} else {
4125 				mpte = _pmap_allocpte(pmap, ptepindex,
4126 				    PMAP_ENTER_NOSLEEP);
4127 				if (mpte == NULL)
4128 					return (mpte);
4129 			}
4130 		}
4131 	} else {
4132 		mpte = NULL;
4133 	}
4134 
4135 	sched_pin();
4136 	pte = pmap_pte_quick(pmap, va);
4137 	if (*pte) {
4138 		if (mpte != NULL)
4139 			mpte->ref_count--;
4140 		sched_unpin();
4141 		return (NULL);
4142 	}
4143 
4144 	/*
4145 	 * Enter on the PV list if part of our managed memory.
4146 	 */
4147 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
4148 	    !pmap_try_insert_pv_entry(pmap, va, m)) {
4149 		if (mpte != NULL)
4150 			pmap_abort_ptp(pmap, va, mpte);
4151 		sched_unpin();
4152 		return (NULL);
4153 	}
4154 
4155 	/*
4156 	 * Increment counters
4157 	 */
4158 	pmap->pm_stats.resident_count++;
4159 
4160 	newpte = VM_PAGE_TO_PHYS(m) | PG_V |
4161 	    pmap_cache_bits(pmap, m->md.pat_mode, 0);
4162 	if ((m->oflags & VPO_UNMANAGED) == 0)
4163 		newpte |= PG_MANAGED;
4164 #ifdef PMAP_PAE_COMP
4165 	if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
4166 		newpte |= pg_nx;
4167 #endif
4168 	if (pmap != kernel_pmap)
4169 		newpte |= PG_U;
4170 	pte_store_zero(pte, newpte);
4171 	sched_unpin();
4172 	return (mpte);
4173 }
4174 
4175 /*
4176  * Make a temporary mapping for a physical address.  This is only intended
4177  * to be used for panic dumps.
4178  */
4179 static void *
4180 __CONCAT(PMTYPE, kenter_temporary)(vm_paddr_t pa, int i)
4181 {
4182 	vm_offset_t va;
4183 
4184 	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
4185 	pmap_kenter(va, pa);
4186 	invlpg(va);
4187 	return ((void *)crashdumpmap);
4188 }
4189 
4190 /*
4191  * This code maps large physical mmap regions into the
4192  * processor address space.  Note that some shortcuts
4193  * are taken, but the code works.
4194  */
4195 static void
4196 __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr,
4197     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
4198 {
4199 	pd_entry_t *pde;
4200 	vm_paddr_t pa, ptepa;
4201 	vm_page_t p;
4202 	int pat_mode;
4203 
4204 	VM_OBJECT_ASSERT_WLOCKED(object);
4205 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4206 	    ("pmap_object_init_pt: non-device object"));
4207 	if (pg_ps_enabled &&
4208 	    (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
4209 		if (!vm_object_populate(object, pindex, pindex + atop(size)))
4210 			return;
4211 		p = vm_page_lookup(object, pindex);
4212 		KASSERT(vm_page_all_valid(p),
4213 		    ("pmap_object_init_pt: invalid page %p", p));
4214 		pat_mode = p->md.pat_mode;
4215 
4216 		/*
4217 		 * Abort the mapping if the first page is not physically
4218 		 * aligned to a 2/4MB page boundary.
4219 		 */
4220 		ptepa = VM_PAGE_TO_PHYS(p);
4221 		if (ptepa & (NBPDR - 1))
4222 			return;
4223 
4224 		/*
4225 		 * Skip the first page.  Abort the mapping if the rest of
4226 		 * the pages are not physically contiguous or have differing
4227 		 * memory attributes.
4228 		 */
4229 		p = TAILQ_NEXT(p, listq);
4230 		for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
4231 		    pa += PAGE_SIZE) {
4232 			KASSERT(vm_page_all_valid(p),
4233 			    ("pmap_object_init_pt: invalid page %p", p));
4234 			if (pa != VM_PAGE_TO_PHYS(p) ||
4235 			    pat_mode != p->md.pat_mode)
4236 				return;
4237 			p = TAILQ_NEXT(p, listq);
4238 		}
4239 
4240 		/*
4241 		 * Map using 2/4MB pages.  Since "ptepa" is 2/4M aligned and
4242 		 * "size" is a multiple of 2/4M, adding the PAT setting to
4243 		 * "pa" will not affect the termination of this loop.
4244 		 */
4245 		PMAP_LOCK(pmap);
4246 		for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
4247 		    pa < ptepa + size; pa += NBPDR) {
4248 			pde = pmap_pde(pmap, addr);
4249 			if (*pde == 0) {
4250 				pde_store(pde, pa | PG_PS | PG_M | PG_A |
4251 				    PG_U | PG_RW | PG_V);
4252 				pmap->pm_stats.resident_count += NBPDR /
4253 				    PAGE_SIZE;
4254 				pmap_pde_mappings++;
4255 			}
4256 			/* Else continue on if the PDE is already valid. */
4257 			addr += NBPDR;
4258 		}
4259 		PMAP_UNLOCK(pmap);
4260 	}
4261 }
4262 
4263 /*
4264  *	Clear the wired attribute from the mappings for the specified range of
4265  *	addresses in the given pmap.  Every valid mapping within that range
4266  *	must have the wired attribute set.  In contrast, invalid mappings
4267  *	cannot have the wired attribute set, so they are ignored.
4268  *
4269  *	The wired attribute of the page table entry is not a hardware feature,
4270  *	so there is no need to invalidate any TLB entries.
4271  */
4272 static void
4273 __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4274 {
4275 	vm_offset_t pdnxt;
4276 	pd_entry_t *pde;
4277 	pt_entry_t *pte;
4278 	boolean_t pv_lists_locked;
4279 
4280 	if (pmap_is_current(pmap))
4281 		pv_lists_locked = FALSE;
4282 	else {
4283 		pv_lists_locked = TRUE;
4284 resume:
4285 		rw_wlock(&pvh_global_lock);
4286 		sched_pin();
4287 	}
4288 	PMAP_LOCK(pmap);
4289 	for (; sva < eva; sva = pdnxt) {
4290 		pdnxt = (sva + NBPDR) & ~PDRMASK;
4291 		if (pdnxt < sva)
4292 			pdnxt = eva;
4293 		pde = pmap_pde(pmap, sva);
4294 		if ((*pde & PG_V) == 0)
4295 			continue;
4296 		if ((*pde & PG_PS) != 0) {
4297 			if ((*pde & PG_W) == 0)
4298 				panic("pmap_unwire: pde %#jx is missing PG_W",
4299 				    (uintmax_t)*pde);
4300 
4301 			/*
4302 			 * Are we unwiring the entire large page?  If not,
4303 			 * demote the mapping and fall through.
4304 			 */
4305 			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
4306 				/*
4307 				 * Regardless of whether a pde (or pte) is 32
4308 				 * or 64 bits in size, PG_W is among the least
4309 				 * significant 32 bits.
4310 				 */
4311 				atomic_clear_int((u_int *)pde, PG_W);
4312 				pmap->pm_stats.wired_count -= NBPDR /
4313 				    PAGE_SIZE;
4314 				continue;
4315 			} else {
4316 				if (!pv_lists_locked) {
4317 					pv_lists_locked = TRUE;
4318 					if (!rw_try_wlock(&pvh_global_lock)) {
4319 						PMAP_UNLOCK(pmap);
4320 						/* Repeat sva. */
4321 						goto resume;
4322 					}
4323 					sched_pin();
4324 				}
4325 				if (!pmap_demote_pde(pmap, pde, sva))
4326 					panic("pmap_unwire: demotion failed");
4327 			}
4328 		}
4329 		if (pdnxt > eva)
4330 			pdnxt = eva;
4331 		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
4332 		    sva += PAGE_SIZE) {
4333 			if ((*pte & PG_V) == 0)
4334 				continue;
4335 			if ((*pte & PG_W) == 0)
4336 				panic("pmap_unwire: pte %#jx is missing PG_W",
4337 				    (uintmax_t)*pte);
4338 
4339 			/*
4340 			 * PG_W must be cleared atomically.  Although the pmap
4341 			 * lock synchronizes access to PG_W, another processor
4342 			 * could be setting PG_M and/or PG_A concurrently.
4343 			 *
4344 			 * PG_W is among the least significant 32 bits.
4345 			 */
4346 			atomic_clear_int((u_int *)pte, PG_W);
4347 			pmap->pm_stats.wired_count--;
4348 		}
4349 	}
4350 	if (pv_lists_locked) {
4351 		sched_unpin();
4352 		rw_wunlock(&pvh_global_lock);
4353 	}
4354 	PMAP_UNLOCK(pmap);
4355 }
4356 
4357 /*
4358  *	Copy the range specified by src_addr/len
4359  *	from the source map to the range dst_addr/len
4360  *	in the destination map.
4361  *
4362  *	This routine is only advisory and need not do anything.  Since
4363  *	current pmap is always the kernel pmap when executing in
4364  *	kernel, and we do not copy from the kernel pmap to a user
4365  *	pmap, this optimization is not usable in 4/4G full split i386
4366  *	world.
4367  */
4368 
4369 static void
4370 __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
4371     vm_size_t len, vm_offset_t src_addr)
4372 {
4373 	pt_entry_t *src_pte, *dst_pte, ptetemp;
4374 	pd_entry_t srcptepaddr;
4375 	vm_page_t dstmpte, srcmpte;
4376 	vm_offset_t addr, end_addr, pdnxt;
4377 	u_int ptepindex;
4378 
4379 	if (dst_addr != src_addr)
4380 		return;
4381 
4382 	end_addr = src_addr + len;
4383 
4384 	rw_wlock(&pvh_global_lock);
4385 	if (dst_pmap < src_pmap) {
4386 		PMAP_LOCK(dst_pmap);
4387 		PMAP_LOCK(src_pmap);
4388 	} else {
4389 		PMAP_LOCK(src_pmap);
4390 		PMAP_LOCK(dst_pmap);
4391 	}
4392 	sched_pin();
4393 	for (addr = src_addr; addr < end_addr; addr = pdnxt) {
4394 		KASSERT(addr < PMAP_TRM_MIN_ADDRESS,
4395 		    ("pmap_copy: invalid to pmap_copy the trampoline"));
4396 
4397 		pdnxt = (addr + NBPDR) & ~PDRMASK;
4398 		if (pdnxt < addr)
4399 			pdnxt = end_addr;
4400 		ptepindex = addr >> PDRSHIFT;
4401 
4402 		srcptepaddr = src_pmap->pm_pdir[ptepindex];
4403 		if (srcptepaddr == 0)
4404 			continue;
4405 
4406 		if (srcptepaddr & PG_PS) {
4407 			if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
4408 				continue;
4409 			if (dst_pmap->pm_pdir[ptepindex] == 0 &&
4410 			    ((srcptepaddr & PG_MANAGED) == 0 ||
4411 			    pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
4412 			    PMAP_ENTER_NORECLAIM))) {
4413 				dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
4414 				    ~PG_W;
4415 				dst_pmap->pm_stats.resident_count +=
4416 				    NBPDR / PAGE_SIZE;
4417 				pmap_pde_mappings++;
4418 			}
4419 			continue;
4420 		}
4421 
4422 		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
4423 		KASSERT(srcmpte->ref_count > 0,
4424 		    ("pmap_copy: source page table page is unused"));
4425 
4426 		if (pdnxt > end_addr)
4427 			pdnxt = end_addr;
4428 
4429 		src_pte = pmap_pte_quick3(src_pmap, addr);
4430 		while (addr < pdnxt) {
4431 			ptetemp = *src_pte;
4432 			/*
4433 			 * we only virtual copy managed pages
4434 			 */
4435 			if ((ptetemp & PG_MANAGED) != 0) {
4436 				dstmpte = pmap_allocpte(dst_pmap, addr,
4437 				    PMAP_ENTER_NOSLEEP);
4438 				if (dstmpte == NULL)
4439 					goto out;
4440 				dst_pte = pmap_pte_quick(dst_pmap, addr);
4441 				if (*dst_pte == 0 &&
4442 				    pmap_try_insert_pv_entry(dst_pmap, addr,
4443 				    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
4444 					/*
4445 					 * Clear the wired, modified, and
4446 					 * accessed (referenced) bits
4447 					 * during the copy.
4448 					 */
4449 					*dst_pte = ptetemp & ~(PG_W | PG_M |
4450 					    PG_A);
4451 					dst_pmap->pm_stats.resident_count++;
4452 				} else {
4453 					pmap_abort_ptp(dst_pmap, addr, dstmpte);
4454 					goto out;
4455 				}
4456 				if (dstmpte->ref_count >= srcmpte->ref_count)
4457 					break;
4458 			}
4459 			addr += PAGE_SIZE;
4460 			src_pte++;
4461 		}
4462 	}
4463 out:
4464 	sched_unpin();
4465 	rw_wunlock(&pvh_global_lock);
4466 	PMAP_UNLOCK(src_pmap);
4467 	PMAP_UNLOCK(dst_pmap);
4468 }
4469 
4470 /*
4471  * Zero 1 page of virtual memory mapped from a hardware page by the caller.
4472  */
4473 static __inline void
4474 pagezero(void *page)
4475 {
4476 #if defined(I686_CPU)
4477 	if (cpu_class == CPUCLASS_686) {
4478 		if (cpu_feature & CPUID_SSE2)
4479 			sse2_pagezero(page);
4480 		else
4481 			i686_pagezero(page);
4482 	} else
4483 #endif
4484 		bzero(page, PAGE_SIZE);
4485 }
4486 
4487 /*
4488  * Zero the specified hardware page.
4489  */
4490 static void
4491 __CONCAT(PMTYPE, zero_page)(vm_page_t m)
4492 {
4493 	pt_entry_t *cmap_pte2;
4494 	struct pcpu *pc;
4495 
4496 	sched_pin();
4497 	pc = get_pcpu();
4498 	cmap_pte2 = pc->pc_cmap_pte2;
4499 	mtx_lock(&pc->pc_cmap_lock);
4500 	if (*cmap_pte2)
4501 		panic("pmap_zero_page: CMAP2 busy");
4502 	*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4503 	    pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
4504 	invlcaddr(pc->pc_cmap_addr2);
4505 	pagezero(pc->pc_cmap_addr2);
4506 	*cmap_pte2 = 0;
4507 
4508 	/*
4509 	 * Unpin the thread before releasing the lock.  Otherwise the thread
4510 	 * could be rescheduled while still bound to the current CPU, only
4511 	 * to unpin itself immediately upon resuming execution.
4512 	 */
4513 	sched_unpin();
4514 	mtx_unlock(&pc->pc_cmap_lock);
4515 }
4516 
4517 /*
4518  * Zero an area within a single hardware page.  off and size must not
4519  * cover an area beyond a single hardware page.
4520  */
4521 static void
4522 __CONCAT(PMTYPE, zero_page_area)(vm_page_t m, int off, int size)
4523 {
4524 	pt_entry_t *cmap_pte2;
4525 	struct pcpu *pc;
4526 
4527 	sched_pin();
4528 	pc = get_pcpu();
4529 	cmap_pte2 = pc->pc_cmap_pte2;
4530 	mtx_lock(&pc->pc_cmap_lock);
4531 	if (*cmap_pte2)
4532 		panic("pmap_zero_page_area: CMAP2 busy");
4533 	*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4534 	    pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
4535 	invlcaddr(pc->pc_cmap_addr2);
4536 	if (off == 0 && size == PAGE_SIZE)
4537 		pagezero(pc->pc_cmap_addr2);
4538 	else
4539 		bzero(pc->pc_cmap_addr2 + off, size);
4540 	*cmap_pte2 = 0;
4541 	sched_unpin();
4542 	mtx_unlock(&pc->pc_cmap_lock);
4543 }
4544 
4545 /*
4546  * Copy 1 specified hardware page to another.
4547  */
4548 static void
4549 __CONCAT(PMTYPE, copy_page)(vm_page_t src, vm_page_t dst)
4550 {
4551 	pt_entry_t *cmap_pte1, *cmap_pte2;
4552 	struct pcpu *pc;
4553 
4554 	sched_pin();
4555 	pc = get_pcpu();
4556 	cmap_pte1 = pc->pc_cmap_pte1;
4557 	cmap_pte2 = pc->pc_cmap_pte2;
4558 	mtx_lock(&pc->pc_cmap_lock);
4559 	if (*cmap_pte1)
4560 		panic("pmap_copy_page: CMAP1 busy");
4561 	if (*cmap_pte2)
4562 		panic("pmap_copy_page: CMAP2 busy");
4563 	*cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
4564 	    pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0);
4565 	invlcaddr(pc->pc_cmap_addr1);
4566 	*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
4567 	    pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0);
4568 	invlcaddr(pc->pc_cmap_addr2);
4569 	bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE);
4570 	*cmap_pte1 = 0;
4571 	*cmap_pte2 = 0;
4572 	sched_unpin();
4573 	mtx_unlock(&pc->pc_cmap_lock);
4574 }
4575 
4576 static void
4577 __CONCAT(PMTYPE, copy_pages)(vm_page_t ma[], vm_offset_t a_offset,
4578     vm_page_t mb[], vm_offset_t b_offset, int xfersize)
4579 {
4580 	vm_page_t a_pg, b_pg;
4581 	char *a_cp, *b_cp;
4582 	vm_offset_t a_pg_offset, b_pg_offset;
4583 	pt_entry_t *cmap_pte1, *cmap_pte2;
4584 	struct pcpu *pc;
4585 	int cnt;
4586 
4587 	sched_pin();
4588 	pc = get_pcpu();
4589 	cmap_pte1 = pc->pc_cmap_pte1;
4590 	cmap_pte2 = pc->pc_cmap_pte2;
4591 	mtx_lock(&pc->pc_cmap_lock);
4592 	if (*cmap_pte1 != 0)
4593 		panic("pmap_copy_pages: CMAP1 busy");
4594 	if (*cmap_pte2 != 0)
4595 		panic("pmap_copy_pages: CMAP2 busy");
4596 	while (xfersize > 0) {
4597 		a_pg = ma[a_offset >> PAGE_SHIFT];
4598 		a_pg_offset = a_offset & PAGE_MASK;
4599 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4600 		b_pg = mb[b_offset >> PAGE_SHIFT];
4601 		b_pg_offset = b_offset & PAGE_MASK;
4602 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4603 		*cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
4604 		    pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0);
4605 		invlcaddr(pc->pc_cmap_addr1);
4606 		*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
4607 		    PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0);
4608 		invlcaddr(pc->pc_cmap_addr2);
4609 		a_cp = pc->pc_cmap_addr1 + a_pg_offset;
4610 		b_cp = pc->pc_cmap_addr2 + b_pg_offset;
4611 		bcopy(a_cp, b_cp, cnt);
4612 		a_offset += cnt;
4613 		b_offset += cnt;
4614 		xfersize -= cnt;
4615 	}
4616 	*cmap_pte1 = 0;
4617 	*cmap_pte2 = 0;
4618 	sched_unpin();
4619 	mtx_unlock(&pc->pc_cmap_lock);
4620 }
4621 
4622 /*
4623  * Returns true if the pmap's pv is one of the first
4624  * 16 pvs linked to from this page.  This count may
4625  * be changed upwards or downwards in the future; it
4626  * is only necessary that true be returned for a small
4627  * subset of pmaps for proper page aging.
4628  */
4629 static boolean_t
4630 __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m)
4631 {
4632 	struct md_page *pvh;
4633 	pv_entry_t pv;
4634 	int loops = 0;
4635 	boolean_t rv;
4636 
4637 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4638 	    ("pmap_page_exists_quick: page %p is not managed", m));
4639 	rv = FALSE;
4640 	rw_wlock(&pvh_global_lock);
4641 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4642 		if (PV_PMAP(pv) == pmap) {
4643 			rv = TRUE;
4644 			break;
4645 		}
4646 		loops++;
4647 		if (loops >= 16)
4648 			break;
4649 	}
4650 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4651 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4652 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4653 			if (PV_PMAP(pv) == pmap) {
4654 				rv = TRUE;
4655 				break;
4656 			}
4657 			loops++;
4658 			if (loops >= 16)
4659 				break;
4660 		}
4661 	}
4662 	rw_wunlock(&pvh_global_lock);
4663 	return (rv);
4664 }
4665 
4666 /*
4667  *	pmap_page_wired_mappings:
4668  *
4669  *	Return the number of managed mappings to the given physical page
4670  *	that are wired.
4671  */
4672 static int
4673 __CONCAT(PMTYPE, page_wired_mappings)(vm_page_t m)
4674 {
4675 	int count;
4676 
4677 	count = 0;
4678 	if ((m->oflags & VPO_UNMANAGED) != 0)
4679 		return (count);
4680 	rw_wlock(&pvh_global_lock);
4681 	count = pmap_pvh_wired_mappings(&m->md, count);
4682 	if ((m->flags & PG_FICTITIOUS) == 0) {
4683 	    count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
4684 	        count);
4685 	}
4686 	rw_wunlock(&pvh_global_lock);
4687 	return (count);
4688 }
4689 
4690 /*
4691  *	pmap_pvh_wired_mappings:
4692  *
4693  *	Return the updated number "count" of managed mappings that are wired.
4694  */
4695 static int
4696 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
4697 {
4698 	pmap_t pmap;
4699 	pt_entry_t *pte;
4700 	pv_entry_t pv;
4701 
4702 	rw_assert(&pvh_global_lock, RA_WLOCKED);
4703 	sched_pin();
4704 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4705 		pmap = PV_PMAP(pv);
4706 		PMAP_LOCK(pmap);
4707 		pte = pmap_pte_quick(pmap, pv->pv_va);
4708 		if ((*pte & PG_W) != 0)
4709 			count++;
4710 		PMAP_UNLOCK(pmap);
4711 	}
4712 	sched_unpin();
4713 	return (count);
4714 }
4715 
4716 /*
4717  * Returns TRUE if the given page is mapped individually or as part of
4718  * a 4mpage.  Otherwise, returns FALSE.
4719  */
4720 static boolean_t
4721 __CONCAT(PMTYPE, page_is_mapped)(vm_page_t m)
4722 {
4723 	boolean_t rv;
4724 
4725 	if ((m->oflags & VPO_UNMANAGED) != 0)
4726 		return (FALSE);
4727 	rw_wlock(&pvh_global_lock);
4728 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
4729 	    ((m->flags & PG_FICTITIOUS) == 0 &&
4730 	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
4731 	rw_wunlock(&pvh_global_lock);
4732 	return (rv);
4733 }
4734 
4735 /*
4736  * Remove all pages from specified address space
4737  * this aids process exit speeds.  Also, this code
4738  * is special cased for current process only, but
4739  * can have the more generic (and slightly slower)
4740  * mode enabled.  This is much faster than pmap_remove
4741  * in the case of running down an entire address space.
4742  */
4743 static void
4744 __CONCAT(PMTYPE, remove_pages)(pmap_t pmap)
4745 {
4746 	pt_entry_t *pte, tpte;
4747 	vm_page_t m, mpte, mt;
4748 	pv_entry_t pv;
4749 	struct md_page *pvh;
4750 	struct pv_chunk *pc, *npc;
4751 	struct spglist free;
4752 	int field, idx;
4753 	int32_t bit;
4754 	uint32_t inuse, bitmask;
4755 	int allfree;
4756 
4757 	if (pmap != PCPU_GET(curpmap)) {
4758 		printf("warning: pmap_remove_pages called with non-current pmap\n");
4759 		return;
4760 	}
4761 	SLIST_INIT(&free);
4762 	rw_wlock(&pvh_global_lock);
4763 	PMAP_LOCK(pmap);
4764 	sched_pin();
4765 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4766 		KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap,
4767 		    pc->pc_pmap));
4768 		allfree = 1;
4769 		for (field = 0; field < _NPCM; field++) {
4770 			inuse = ~pc->pc_map[field] & pc_freemask[field];
4771 			while (inuse != 0) {
4772 				bit = bsfl(inuse);
4773 				bitmask = 1UL << bit;
4774 				idx = field * 32 + bit;
4775 				pv = &pc->pc_pventry[idx];
4776 				inuse &= ~bitmask;
4777 
4778 				pte = pmap_pde(pmap, pv->pv_va);
4779 				tpte = *pte;
4780 				if ((tpte & PG_PS) == 0) {
4781 					pte = pmap_pte_quick(pmap, pv->pv_va);
4782 					tpte = *pte & ~PG_PTE_PAT;
4783 				}
4784 
4785 				if (tpte == 0) {
4786 					printf(
4787 					    "TPTE at %p  IS ZERO @ VA %08x\n",
4788 					    pte, pv->pv_va);
4789 					panic("bad pte");
4790 				}
4791 
4792 /*
4793  * We cannot remove wired pages from a process' mapping at this time
4794  */
4795 				if (tpte & PG_W) {
4796 					allfree = 0;
4797 					continue;
4798 				}
4799 
4800 				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
4801 				KASSERT(m->phys_addr == (tpte & PG_FRAME),
4802 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4803 				    m, (uintmax_t)m->phys_addr,
4804 				    (uintmax_t)tpte));
4805 
4806 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4807 				    m < &vm_page_array[vm_page_array_size],
4808 				    ("pmap_remove_pages: bad tpte %#jx",
4809 				    (uintmax_t)tpte));
4810 
4811 				pte_clear(pte);
4812 
4813 				/*
4814 				 * Update the vm_page_t clean/reference bits.
4815 				 */
4816 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
4817 					if ((tpte & PG_PS) != 0) {
4818 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4819 							vm_page_dirty(mt);
4820 					} else
4821 						vm_page_dirty(m);
4822 				}
4823 
4824 				/* Mark free */
4825 				PV_STAT(pv_entry_frees++);
4826 				PV_STAT(pv_entry_spare++);
4827 				pv_entry_count--;
4828 				pc->pc_map[field] |= bitmask;
4829 				if ((tpte & PG_PS) != 0) {
4830 					pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
4831 					pvh = pa_to_pvh(tpte & PG_PS_FRAME);
4832 					TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4833 					if (TAILQ_EMPTY(&pvh->pv_list)) {
4834 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4835 							if (TAILQ_EMPTY(&mt->md.pv_list))
4836 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
4837 					}
4838 					mpte = pmap_remove_pt_page(pmap, pv->pv_va);
4839 					if (mpte != NULL) {
4840 						KASSERT(vm_page_all_valid(mpte),
4841 						    ("pmap_remove_pages: pte page not promoted"));
4842 						pmap->pm_stats.resident_count--;
4843 						KASSERT(mpte->ref_count == NPTEPG,
4844 						    ("pmap_remove_pages: pte page ref count error"));
4845 						mpte->ref_count = 0;
4846 						pmap_add_delayed_free_list(mpte, &free, FALSE);
4847 					}
4848 				} else {
4849 					pmap->pm_stats.resident_count--;
4850 					TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4851 					if (TAILQ_EMPTY(&m->md.pv_list) &&
4852 					    (m->flags & PG_FICTITIOUS) == 0) {
4853 						pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4854 						if (TAILQ_EMPTY(&pvh->pv_list))
4855 							vm_page_aflag_clear(m, PGA_WRITEABLE);
4856 					}
4857 					pmap_unuse_pt(pmap, pv->pv_va, &free);
4858 				}
4859 			}
4860 		}
4861 		if (allfree) {
4862 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4863 			free_pv_chunk(pc);
4864 		}
4865 	}
4866 	sched_unpin();
4867 	pmap_invalidate_all_int(pmap);
4868 	rw_wunlock(&pvh_global_lock);
4869 	PMAP_UNLOCK(pmap);
4870 	vm_page_free_pages_toq(&free, true);
4871 }
4872 
4873 /*
4874  *	pmap_is_modified:
4875  *
4876  *	Return whether or not the specified physical page was modified
4877  *	in any physical maps.
4878  */
4879 static boolean_t
4880 __CONCAT(PMTYPE, is_modified)(vm_page_t m)
4881 {
4882 	boolean_t rv;
4883 
4884 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4885 	    ("pmap_is_modified: page %p is not managed", m));
4886 
4887 	/*
4888 	 * If the page is not busied then this check is racy.
4889 	 */
4890 	if (!pmap_page_is_write_mapped(m))
4891 		return (FALSE);
4892 	rw_wlock(&pvh_global_lock);
4893 	rv = pmap_is_modified_pvh(&m->md) ||
4894 	    ((m->flags & PG_FICTITIOUS) == 0 &&
4895 	    pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4896 	rw_wunlock(&pvh_global_lock);
4897 	return (rv);
4898 }
4899 
4900 /*
4901  * Returns TRUE if any of the given mappings were used to modify
4902  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
4903  * mappings are supported.
4904  */
4905 static boolean_t
4906 pmap_is_modified_pvh(struct md_page *pvh)
4907 {
4908 	pv_entry_t pv;
4909 	pt_entry_t *pte;
4910 	pmap_t pmap;
4911 	boolean_t rv;
4912 
4913 	rw_assert(&pvh_global_lock, RA_WLOCKED);
4914 	rv = FALSE;
4915 	sched_pin();
4916 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4917 		pmap = PV_PMAP(pv);
4918 		PMAP_LOCK(pmap);
4919 		pte = pmap_pte_quick(pmap, pv->pv_va);
4920 		rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
4921 		PMAP_UNLOCK(pmap);
4922 		if (rv)
4923 			break;
4924 	}
4925 	sched_unpin();
4926 	return (rv);
4927 }
4928 
4929 /*
4930  *	pmap_is_prefaultable:
4931  *
4932  *	Return whether or not the specified virtual address is elgible
4933  *	for prefault.
4934  */
4935 static boolean_t
4936 __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr)
4937 {
4938 	pd_entry_t pde;
4939 	boolean_t rv;
4940 
4941 	rv = FALSE;
4942 	PMAP_LOCK(pmap);
4943 	pde = *pmap_pde(pmap, addr);
4944 	if (pde != 0 && (pde & PG_PS) == 0)
4945 		rv = pmap_pte_ufast(pmap, addr, pde) == 0;
4946 	PMAP_UNLOCK(pmap);
4947 	return (rv);
4948 }
4949 
4950 /*
4951  *	pmap_is_referenced:
4952  *
4953  *	Return whether or not the specified physical page was referenced
4954  *	in any physical maps.
4955  */
4956 static boolean_t
4957 __CONCAT(PMTYPE, is_referenced)(vm_page_t m)
4958 {
4959 	boolean_t rv;
4960 
4961 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4962 	    ("pmap_is_referenced: page %p is not managed", m));
4963 	rw_wlock(&pvh_global_lock);
4964 	rv = pmap_is_referenced_pvh(&m->md) ||
4965 	    ((m->flags & PG_FICTITIOUS) == 0 &&
4966 	    pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4967 	rw_wunlock(&pvh_global_lock);
4968 	return (rv);
4969 }
4970 
4971 /*
4972  * Returns TRUE if any of the given mappings were referenced and FALSE
4973  * otherwise.  Both page and 4mpage mappings are supported.
4974  */
4975 static boolean_t
4976 pmap_is_referenced_pvh(struct md_page *pvh)
4977 {
4978 	pv_entry_t pv;
4979 	pt_entry_t *pte;
4980 	pmap_t pmap;
4981 	boolean_t rv;
4982 
4983 	rw_assert(&pvh_global_lock, RA_WLOCKED);
4984 	rv = FALSE;
4985 	sched_pin();
4986 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4987 		pmap = PV_PMAP(pv);
4988 		PMAP_LOCK(pmap);
4989 		pte = pmap_pte_quick(pmap, pv->pv_va);
4990 		rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
4991 		PMAP_UNLOCK(pmap);
4992 		if (rv)
4993 			break;
4994 	}
4995 	sched_unpin();
4996 	return (rv);
4997 }
4998 
4999 /*
5000  * Clear the write and modified bits in each of the given page's mappings.
5001  */
5002 static void
5003 __CONCAT(PMTYPE, remove_write)(vm_page_t m)
5004 {
5005 	struct md_page *pvh;
5006 	pv_entry_t next_pv, pv;
5007 	pmap_t pmap;
5008 	pd_entry_t *pde;
5009 	pt_entry_t oldpte, *pte;
5010 	vm_offset_t va;
5011 
5012 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5013 	    ("pmap_remove_write: page %p is not managed", m));
5014 	vm_page_assert_busied(m);
5015 
5016 	if (!pmap_page_is_write_mapped(m))
5017 		return;
5018 	rw_wlock(&pvh_global_lock);
5019 	sched_pin();
5020 	if ((m->flags & PG_FICTITIOUS) != 0)
5021 		goto small_mappings;
5022 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5023 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5024 		va = pv->pv_va;
5025 		pmap = PV_PMAP(pv);
5026 		PMAP_LOCK(pmap);
5027 		pde = pmap_pde(pmap, va);
5028 		if ((*pde & PG_RW) != 0)
5029 			(void)pmap_demote_pde(pmap, pde, va);
5030 		PMAP_UNLOCK(pmap);
5031 	}
5032 small_mappings:
5033 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5034 		pmap = PV_PMAP(pv);
5035 		PMAP_LOCK(pmap);
5036 		pde = pmap_pde(pmap, pv->pv_va);
5037 		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
5038 		    " a 4mpage in page %p's pv list", m));
5039 		pte = pmap_pte_quick(pmap, pv->pv_va);
5040 retry:
5041 		oldpte = *pte;
5042 		if ((oldpte & PG_RW) != 0) {
5043 			/*
5044 			 * Regardless of whether a pte is 32 or 64 bits
5045 			 * in size, PG_RW and PG_M are among the least
5046 			 * significant 32 bits.
5047 			 */
5048 			if (!atomic_cmpset_int((u_int *)pte, oldpte,
5049 			    oldpte & ~(PG_RW | PG_M)))
5050 				goto retry;
5051 			if ((oldpte & PG_M) != 0)
5052 				vm_page_dirty(m);
5053 			pmap_invalidate_page_int(pmap, pv->pv_va);
5054 		}
5055 		PMAP_UNLOCK(pmap);
5056 	}
5057 	vm_page_aflag_clear(m, PGA_WRITEABLE);
5058 	sched_unpin();
5059 	rw_wunlock(&pvh_global_lock);
5060 }
5061 
5062 /*
5063  *	pmap_ts_referenced:
5064  *
5065  *	Return a count of reference bits for a page, clearing those bits.
5066  *	It is not necessary for every reference bit to be cleared, but it
5067  *	is necessary that 0 only be returned when there are truly no
5068  *	reference bits set.
5069  *
5070  *	As an optimization, update the page's dirty field if a modified bit is
5071  *	found while counting reference bits.  This opportunistic update can be
5072  *	performed at low cost and can eliminate the need for some future calls
5073  *	to pmap_is_modified().  However, since this function stops after
5074  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
5075  *	dirty pages.  Those dirty pages will only be detected by a future call
5076  *	to pmap_is_modified().
5077  */
5078 static int
5079 __CONCAT(PMTYPE, ts_referenced)(vm_page_t m)
5080 {
5081 	struct md_page *pvh;
5082 	pv_entry_t pv, pvf;
5083 	pmap_t pmap;
5084 	pd_entry_t *pde;
5085 	pt_entry_t *pte;
5086 	vm_paddr_t pa;
5087 	int rtval = 0;
5088 
5089 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5090 	    ("pmap_ts_referenced: page %p is not managed", m));
5091 	pa = VM_PAGE_TO_PHYS(m);
5092 	pvh = pa_to_pvh(pa);
5093 	rw_wlock(&pvh_global_lock);
5094 	sched_pin();
5095 	if ((m->flags & PG_FICTITIOUS) != 0 ||
5096 	    (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
5097 		goto small_mappings;
5098 	pv = pvf;
5099 	do {
5100 		pmap = PV_PMAP(pv);
5101 		PMAP_LOCK(pmap);
5102 		pde = pmap_pde(pmap, pv->pv_va);
5103 		if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5104 			/*
5105 			 * Although "*pde" is mapping a 2/4MB page, because
5106 			 * this function is called at a 4KB page granularity,
5107 			 * we only update the 4KB page under test.
5108 			 */
5109 			vm_page_dirty(m);
5110 		}
5111 		if ((*pde & PG_A) != 0) {
5112 			/*
5113 			 * Since this reference bit is shared by either 1024
5114 			 * or 512 4KB pages, it should not be cleared every
5115 			 * time it is tested.  Apply a simple "hash" function
5116 			 * on the physical page number, the virtual superpage
5117 			 * number, and the pmap address to select one 4KB page
5118 			 * out of the 1024 or 512 on which testing the
5119 			 * reference bit will result in clearing that bit.
5120 			 * This function is designed to avoid the selection of
5121 			 * the same 4KB page for every 2- or 4MB page mapping.
5122 			 *
5123 			 * On demotion, a mapping that hasn't been referenced
5124 			 * is simply destroyed.  To avoid the possibility of a
5125 			 * subsequent page fault on a demoted wired mapping,
5126 			 * always leave its reference bit set.  Moreover,
5127 			 * since the superpage is wired, the current state of
5128 			 * its reference bit won't affect page replacement.
5129 			 */
5130 			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
5131 			    (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
5132 			    (*pde & PG_W) == 0) {
5133 				atomic_clear_int((u_int *)pde, PG_A);
5134 				pmap_invalidate_page_int(pmap, pv->pv_va);
5135 			}
5136 			rtval++;
5137 		}
5138 		PMAP_UNLOCK(pmap);
5139 		/* Rotate the PV list if it has more than one entry. */
5140 		if (TAILQ_NEXT(pv, pv_next) != NULL) {
5141 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5142 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5143 		}
5144 		if (rtval >= PMAP_TS_REFERENCED_MAX)
5145 			goto out;
5146 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
5147 small_mappings:
5148 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
5149 		goto out;
5150 	pv = pvf;
5151 	do {
5152 		pmap = PV_PMAP(pv);
5153 		PMAP_LOCK(pmap);
5154 		pde = pmap_pde(pmap, pv->pv_va);
5155 		KASSERT((*pde & PG_PS) == 0,
5156 		    ("pmap_ts_referenced: found a 4mpage in page %p's pv list",
5157 		    m));
5158 		pte = pmap_pte_quick(pmap, pv->pv_va);
5159 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5160 			vm_page_dirty(m);
5161 		if ((*pte & PG_A) != 0) {
5162 			atomic_clear_int((u_int *)pte, PG_A);
5163 			pmap_invalidate_page_int(pmap, pv->pv_va);
5164 			rtval++;
5165 		}
5166 		PMAP_UNLOCK(pmap);
5167 		/* Rotate the PV list if it has more than one entry. */
5168 		if (TAILQ_NEXT(pv, pv_next) != NULL) {
5169 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5170 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5171 		}
5172 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval <
5173 	    PMAP_TS_REFERENCED_MAX);
5174 out:
5175 	sched_unpin();
5176 	rw_wunlock(&pvh_global_lock);
5177 	return (rtval);
5178 }
5179 
5180 /*
5181  *	Apply the given advice to the specified range of addresses within the
5182  *	given pmap.  Depending on the advice, clear the referenced and/or
5183  *	modified flags in each mapping and set the mapped page's dirty field.
5184  */
5185 static void
5186 __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5187     int advice)
5188 {
5189 	pd_entry_t oldpde, *pde;
5190 	pt_entry_t *pte;
5191 	vm_offset_t va, pdnxt;
5192 	vm_page_t m;
5193 	bool anychanged, pv_lists_locked;
5194 
5195 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
5196 		return;
5197 	if (pmap_is_current(pmap))
5198 		pv_lists_locked = false;
5199 	else {
5200 		pv_lists_locked = true;
5201 resume:
5202 		rw_wlock(&pvh_global_lock);
5203 		sched_pin();
5204 	}
5205 	anychanged = false;
5206 	PMAP_LOCK(pmap);
5207 	for (; sva < eva; sva = pdnxt) {
5208 		pdnxt = (sva + NBPDR) & ~PDRMASK;
5209 		if (pdnxt < sva)
5210 			pdnxt = eva;
5211 		pde = pmap_pde(pmap, sva);
5212 		oldpde = *pde;
5213 		if ((oldpde & PG_V) == 0)
5214 			continue;
5215 		else if ((oldpde & PG_PS) != 0) {
5216 			if ((oldpde & PG_MANAGED) == 0)
5217 				continue;
5218 			if (!pv_lists_locked) {
5219 				pv_lists_locked = true;
5220 				if (!rw_try_wlock(&pvh_global_lock)) {
5221 					if (anychanged)
5222 						pmap_invalidate_all_int(pmap);
5223 					PMAP_UNLOCK(pmap);
5224 					goto resume;
5225 				}
5226 				sched_pin();
5227 			}
5228 			if (!pmap_demote_pde(pmap, pde, sva)) {
5229 				/*
5230 				 * The large page mapping was destroyed.
5231 				 */
5232 				continue;
5233 			}
5234 
5235 			/*
5236 			 * Unless the page mappings are wired, remove the
5237 			 * mapping to a single page so that a subsequent
5238 			 * access may repromote.  Choosing the last page
5239 			 * within the address range [sva, min(pdnxt, eva))
5240 			 * generally results in more repromotions.  Since the
5241 			 * underlying page table page is fully populated, this
5242 			 * removal never frees a page table page.
5243 			 */
5244 			if ((oldpde & PG_W) == 0) {
5245 				va = eva;
5246 				if (va > pdnxt)
5247 					va = pdnxt;
5248 				va -= PAGE_SIZE;
5249 				KASSERT(va >= sva,
5250 				    ("pmap_advise: no address gap"));
5251 				pte = pmap_pte_quick(pmap, va);
5252 				KASSERT((*pte & PG_V) != 0,
5253 				    ("pmap_advise: invalid PTE"));
5254 				pmap_remove_pte(pmap, pte, va, NULL);
5255 				anychanged = true;
5256 			}
5257 		}
5258 		if (pdnxt > eva)
5259 			pdnxt = eva;
5260 		va = pdnxt;
5261 		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
5262 		    sva += PAGE_SIZE) {
5263 			if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
5264 				goto maybe_invlrng;
5265 			else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5266 				if (advice == MADV_DONTNEED) {
5267 					/*
5268 					 * Future calls to pmap_is_modified()
5269 					 * can be avoided by making the page
5270 					 * dirty now.
5271 					 */
5272 					m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
5273 					vm_page_dirty(m);
5274 				}
5275 				atomic_clear_int((u_int *)pte, PG_M | PG_A);
5276 			} else if ((*pte & PG_A) != 0)
5277 				atomic_clear_int((u_int *)pte, PG_A);
5278 			else
5279 				goto maybe_invlrng;
5280 			if ((*pte & PG_G) != 0) {
5281 				if (va == pdnxt)
5282 					va = sva;
5283 			} else
5284 				anychanged = true;
5285 			continue;
5286 maybe_invlrng:
5287 			if (va != pdnxt) {
5288 				pmap_invalidate_range_int(pmap, va, sva);
5289 				va = pdnxt;
5290 			}
5291 		}
5292 		if (va != pdnxt)
5293 			pmap_invalidate_range_int(pmap, va, sva);
5294 	}
5295 	if (anychanged)
5296 		pmap_invalidate_all_int(pmap);
5297 	if (pv_lists_locked) {
5298 		sched_unpin();
5299 		rw_wunlock(&pvh_global_lock);
5300 	}
5301 	PMAP_UNLOCK(pmap);
5302 }
5303 
5304 /*
5305  *	Clear the modify bits on the specified physical page.
5306  */
5307 static void
5308 __CONCAT(PMTYPE, clear_modify)(vm_page_t m)
5309 {
5310 	struct md_page *pvh;
5311 	pv_entry_t next_pv, pv;
5312 	pmap_t pmap;
5313 	pd_entry_t oldpde, *pde;
5314 	pt_entry_t *pte;
5315 	vm_offset_t va;
5316 
5317 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5318 	    ("pmap_clear_modify: page %p is not managed", m));
5319 	vm_page_assert_busied(m);
5320 
5321 	if (!pmap_page_is_write_mapped(m))
5322 		return;
5323 	rw_wlock(&pvh_global_lock);
5324 	sched_pin();
5325 	if ((m->flags & PG_FICTITIOUS) != 0)
5326 		goto small_mappings;
5327 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5328 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5329 		va = pv->pv_va;
5330 		pmap = PV_PMAP(pv);
5331 		PMAP_LOCK(pmap);
5332 		pde = pmap_pde(pmap, va);
5333 		oldpde = *pde;
5334 		/* If oldpde has PG_RW set, then it also has PG_M set. */
5335 		if ((oldpde & PG_RW) != 0 &&
5336 		    pmap_demote_pde(pmap, pde, va) &&
5337 		    (oldpde & PG_W) == 0) {
5338 			/*
5339 			 * Write protect the mapping to a single page so that
5340 			 * a subsequent write access may repromote.
5341 			 */
5342 			va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME);
5343 			pte = pmap_pte_quick(pmap, va);
5344 			/*
5345 			 * Regardless of whether a pte is 32 or 64 bits
5346 			 * in size, PG_RW and PG_M are among the least
5347 			 * significant 32 bits.
5348 			 */
5349 			atomic_clear_int((u_int *)pte, PG_M | PG_RW);
5350 			vm_page_dirty(m);
5351 			pmap_invalidate_page_int(pmap, va);
5352 		}
5353 		PMAP_UNLOCK(pmap);
5354 	}
5355 small_mappings:
5356 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5357 		pmap = PV_PMAP(pv);
5358 		PMAP_LOCK(pmap);
5359 		pde = pmap_pde(pmap, pv->pv_va);
5360 		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
5361 		    " a 4mpage in page %p's pv list", m));
5362 		pte = pmap_pte_quick(pmap, pv->pv_va);
5363 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5364 			/*
5365 			 * Regardless of whether a pte is 32 or 64 bits
5366 			 * in size, PG_M is among the least significant
5367 			 * 32 bits.
5368 			 */
5369 			atomic_clear_int((u_int *)pte, PG_M);
5370 			pmap_invalidate_page_int(pmap, pv->pv_va);
5371 		}
5372 		PMAP_UNLOCK(pmap);
5373 	}
5374 	sched_unpin();
5375 	rw_wunlock(&pvh_global_lock);
5376 }
5377 
5378 /*
5379  * Miscellaneous support routines follow
5380  */
5381 
5382 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
5383 static __inline void
5384 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
5385 {
5386 	u_int opte, npte;
5387 
5388 	/*
5389 	 * The cache mode bits are all in the low 32-bits of the
5390 	 * PTE, so we can just spin on updating the low 32-bits.
5391 	 */
5392 	do {
5393 		opte = *(u_int *)pte;
5394 		npte = opte & ~PG_PTE_CACHE;
5395 		npte |= cache_bits;
5396 	} while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
5397 }
5398 
5399 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
5400 static __inline void
5401 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
5402 {
5403 	u_int opde, npde;
5404 
5405 	/*
5406 	 * The cache mode bits are all in the low 32-bits of the
5407 	 * PDE, so we can just spin on updating the low 32-bits.
5408 	 */
5409 	do {
5410 		opde = *(u_int *)pde;
5411 		npde = opde & ~PG_PDE_CACHE;
5412 		npde |= cache_bits;
5413 	} while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
5414 }
5415 
5416 /*
5417  * Map a set of physical memory pages into the kernel virtual
5418  * address space. Return a pointer to where it is mapped. This
5419  * routine is intended to be used for mapping device memory,
5420  * NOT real memory.
5421  */
5422 static void *
5423 __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode,
5424     int flags)
5425 {
5426 	struct pmap_preinit_mapping *ppim;
5427 	vm_offset_t va, offset;
5428 	vm_page_t m;
5429 	vm_size_t tmpsize;
5430 	int i;
5431 
5432 	offset = pa & PAGE_MASK;
5433 	size = round_page(offset + size);
5434 	pa = pa & PG_FRAME;
5435 
5436 	if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) {
5437 		va = pa + PMAP_MAP_LOW;
5438 		if ((flags & MAPDEV_SETATTR) == 0)
5439 			return ((void *)(va + offset));
5440 	} else if (!pmap_initialized) {
5441 		va = 0;
5442 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5443 			ppim = pmap_preinit_mapping + i;
5444 			if (ppim->va == 0) {
5445 				ppim->pa = pa;
5446 				ppim->sz = size;
5447 				ppim->mode = mode;
5448 				ppim->va = virtual_avail;
5449 				virtual_avail += size;
5450 				va = ppim->va;
5451 				break;
5452 			}
5453 		}
5454 		if (va == 0)
5455 			panic("%s: too many preinit mappings", __func__);
5456 	} else {
5457 		/*
5458 		 * If we have a preinit mapping, re-use it.
5459 		 */
5460 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5461 			ppim = pmap_preinit_mapping + i;
5462 			if (ppim->pa == pa && ppim->sz == size &&
5463 			    (ppim->mode == mode ||
5464 			    (flags & MAPDEV_SETATTR) == 0))
5465 				return ((void *)(ppim->va + offset));
5466 		}
5467 		va = kva_alloc(size);
5468 		if (va == 0)
5469 			panic("%s: Couldn't allocate KVA", __func__);
5470 	}
5471 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) {
5472 		if ((flags & MAPDEV_SETATTR) == 0 && pmap_initialized) {
5473 			m = PHYS_TO_VM_PAGE(pa);
5474 			if (m != NULL && VM_PAGE_TO_PHYS(m) == pa) {
5475 				pmap_kenter_attr(va + tmpsize, pa + tmpsize,
5476 				    m->md.pat_mode);
5477 				continue;
5478 			}
5479 		}
5480 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
5481 	}
5482 	pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize);
5483 	pmap_invalidate_cache_range(va, va + size);
5484 	return ((void *)(va + offset));
5485 }
5486 
5487 static void
5488 __CONCAT(PMTYPE, unmapdev)(void *p, vm_size_t size)
5489 {
5490 	struct pmap_preinit_mapping *ppim;
5491 	vm_offset_t offset, va;
5492 	int i;
5493 
5494 	va = (vm_offset_t)p;
5495 	if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE)
5496 		return;
5497 	offset = va & PAGE_MASK;
5498 	size = round_page(offset + size);
5499 	va = trunc_page(va);
5500 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5501 		ppim = pmap_preinit_mapping + i;
5502 		if (ppim->va == va && ppim->sz == size) {
5503 			if (pmap_initialized)
5504 				return;
5505 			ppim->pa = 0;
5506 			ppim->va = 0;
5507 			ppim->sz = 0;
5508 			ppim->mode = 0;
5509 			if (va + size == virtual_avail)
5510 				virtual_avail = va;
5511 			return;
5512 		}
5513 	}
5514 	if (pmap_initialized) {
5515 		pmap_qremove(va, atop(size));
5516 		kva_free(va, size);
5517 	}
5518 }
5519 
5520 /*
5521  * Sets the memory attribute for the specified page.
5522  */
5523 static void
5524 __CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma)
5525 {
5526 
5527 	m->md.pat_mode = ma;
5528 	if ((m->flags & PG_FICTITIOUS) != 0)
5529 		return;
5530 
5531 	/*
5532 	 * If "m" is a normal page, flush it from the cache.
5533 	 * See pmap_invalidate_cache_range().
5534 	 *
5535 	 * First, try to find an existing mapping of the page by sf
5536 	 * buffer. sf_buf_invalidate_cache() modifies mapping and
5537 	 * flushes the cache.
5538 	 */
5539 	if (sf_buf_invalidate_cache(m))
5540 		return;
5541 
5542 	/*
5543 	 * If page is not mapped by sf buffer, but CPU does not
5544 	 * support self snoop, map the page transient and do
5545 	 * invalidation. In the worst case, whole cache is flushed by
5546 	 * pmap_invalidate_cache_range().
5547 	 */
5548 	if ((cpu_feature & CPUID_SS) == 0)
5549 		pmap_flush_page(m);
5550 }
5551 
5552 static void
5553 __CONCAT(PMTYPE, flush_page)(vm_page_t m)
5554 {
5555 	pt_entry_t *cmap_pte2;
5556 	struct pcpu *pc;
5557 	vm_offset_t sva, eva;
5558 	bool useclflushopt;
5559 
5560 	useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
5561 	if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) {
5562 		sched_pin();
5563 		pc = get_pcpu();
5564 		cmap_pte2 = pc->pc_cmap_pte2;
5565 		mtx_lock(&pc->pc_cmap_lock);
5566 		if (*cmap_pte2)
5567 			panic("pmap_flush_page: CMAP2 busy");
5568 		*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
5569 		    PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode,
5570 		    0);
5571 		invlcaddr(pc->pc_cmap_addr2);
5572 		sva = (vm_offset_t)pc->pc_cmap_addr2;
5573 		eva = sva + PAGE_SIZE;
5574 
5575 		/*
5576 		 * Use mfence or sfence despite the ordering implied by
5577 		 * mtx_{un,}lock() because clflush on non-Intel CPUs
5578 		 * and clflushopt are not guaranteed to be ordered by
5579 		 * any other instruction.
5580 		 */
5581 		if (useclflushopt)
5582 			sfence();
5583 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
5584 			mfence();
5585 		for (; sva < eva; sva += cpu_clflush_line_size) {
5586 			if (useclflushopt)
5587 				clflushopt(sva);
5588 			else
5589 				clflush(sva);
5590 		}
5591 		if (useclflushopt)
5592 			sfence();
5593 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
5594 			mfence();
5595 		*cmap_pte2 = 0;
5596 		sched_unpin();
5597 		mtx_unlock(&pc->pc_cmap_lock);
5598 	} else
5599 		pmap_invalidate_cache();
5600 }
5601 
5602 /*
5603  * Changes the specified virtual address range's memory type to that given by
5604  * the parameter "mode".  The specified virtual address range must be
5605  * completely contained within either the kernel map.
5606  *
5607  * Returns zero if the change completed successfully, and either EINVAL or
5608  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
5609  * of the virtual address range was not mapped, and ENOMEM is returned if
5610  * there was insufficient memory available to complete the change.
5611  */
5612 static int
5613 __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode)
5614 {
5615 	vm_offset_t base, offset, tmpva;
5616 	pd_entry_t *pde;
5617 	pt_entry_t *pte;
5618 	int cache_bits_pte, cache_bits_pde;
5619 	boolean_t changed;
5620 
5621 	base = trunc_page(va);
5622 	offset = va & PAGE_MASK;
5623 	size = round_page(offset + size);
5624 
5625 	/*
5626 	 * Only supported on kernel virtual addresses above the recursive map.
5627 	 */
5628 	if (base < VM_MIN_KERNEL_ADDRESS)
5629 		return (EINVAL);
5630 
5631 	cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1);
5632 	cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0);
5633 	changed = FALSE;
5634 
5635 	/*
5636 	 * Pages that aren't mapped aren't supported.  Also break down
5637 	 * 2/4MB pages into 4KB pages if required.
5638 	 */
5639 	PMAP_LOCK(kernel_pmap);
5640 	for (tmpva = base; tmpva < base + size; ) {
5641 		pde = pmap_pde(kernel_pmap, tmpva);
5642 		if (*pde == 0) {
5643 			PMAP_UNLOCK(kernel_pmap);
5644 			return (EINVAL);
5645 		}
5646 		if (*pde & PG_PS) {
5647 			/*
5648 			 * If the current 2/4MB page already has
5649 			 * the required memory type, then we need not
5650 			 * demote this page.  Just increment tmpva to
5651 			 * the next 2/4MB page frame.
5652 			 */
5653 			if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
5654 				tmpva = trunc_4mpage(tmpva) + NBPDR;
5655 				continue;
5656 			}
5657 
5658 			/*
5659 			 * If the current offset aligns with a 2/4MB
5660 			 * page frame and there is at least 2/4MB left
5661 			 * within the range, then we need not break
5662 			 * down this page into 4KB pages.
5663 			 */
5664 			if ((tmpva & PDRMASK) == 0 &&
5665 			    tmpva + PDRMASK < base + size) {
5666 				tmpva += NBPDR;
5667 				continue;
5668 			}
5669 			if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
5670 				PMAP_UNLOCK(kernel_pmap);
5671 				return (ENOMEM);
5672 			}
5673 		}
5674 		pte = vtopte(tmpva);
5675 		if (*pte == 0) {
5676 			PMAP_UNLOCK(kernel_pmap);
5677 			return (EINVAL);
5678 		}
5679 		tmpva += PAGE_SIZE;
5680 	}
5681 	PMAP_UNLOCK(kernel_pmap);
5682 
5683 	/*
5684 	 * Ok, all the pages exist, so run through them updating their
5685 	 * cache mode if required.
5686 	 */
5687 	for (tmpva = base; tmpva < base + size; ) {
5688 		pde = pmap_pde(kernel_pmap, tmpva);
5689 		if (*pde & PG_PS) {
5690 			if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
5691 				pmap_pde_attr(pde, cache_bits_pde);
5692 				changed = TRUE;
5693 			}
5694 			tmpva = trunc_4mpage(tmpva) + NBPDR;
5695 		} else {
5696 			pte = vtopte(tmpva);
5697 			if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
5698 				pmap_pte_attr(pte, cache_bits_pte);
5699 				changed = TRUE;
5700 			}
5701 			tmpva += PAGE_SIZE;
5702 		}
5703 	}
5704 
5705 	/*
5706 	 * Flush CPU caches to make sure any data isn't cached that
5707 	 * shouldn't be, etc.
5708 	 */
5709 	if (changed) {
5710 		pmap_invalidate_range_int(kernel_pmap, base, tmpva);
5711 		pmap_invalidate_cache_range(base, tmpva);
5712 	}
5713 	return (0);
5714 }
5715 
5716 /*
5717  * Perform the pmap work for mincore(2).  If the page is not both referenced and
5718  * modified by this pmap, returns its physical address so that the caller can
5719  * find other mappings.
5720  */
5721 static int
5722 __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
5723 {
5724 	pd_entry_t pde;
5725 	pt_entry_t pte;
5726 	vm_paddr_t pa;
5727 	int val;
5728 
5729 	PMAP_LOCK(pmap);
5730 	pde = *pmap_pde(pmap, addr);
5731 	if (pde != 0) {
5732 		if ((pde & PG_PS) != 0) {
5733 			pte = pde;
5734 			/* Compute the physical address of the 4KB page. */
5735 			pa = ((pde & PG_PS_FRAME) | (addr & PDRMASK)) &
5736 			    PG_FRAME;
5737 			val = MINCORE_PSIND(1);
5738 		} else {
5739 			pte = pmap_pte_ufast(pmap, addr, pde);
5740 			pa = pte & PG_FRAME;
5741 			val = 0;
5742 		}
5743 	} else {
5744 		pte = 0;
5745 		pa = 0;
5746 		val = 0;
5747 	}
5748 	if ((pte & PG_V) != 0) {
5749 		val |= MINCORE_INCORE;
5750 		if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5751 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5752 		if ((pte & PG_A) != 0)
5753 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5754 	}
5755 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5756 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5757 	    (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5758 		*pap = pa;
5759 	}
5760 	PMAP_UNLOCK(pmap);
5761 	return (val);
5762 }
5763 
5764 static void
5765 __CONCAT(PMTYPE, activate)(struct thread *td)
5766 {
5767 	pmap_t	pmap, oldpmap;
5768 	u_int	cpuid;
5769 	u_int32_t  cr3;
5770 
5771 	critical_enter();
5772 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
5773 	oldpmap = PCPU_GET(curpmap);
5774 	cpuid = PCPU_GET(cpuid);
5775 #if defined(SMP)
5776 	CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
5777 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
5778 #else
5779 	CPU_CLR(cpuid, &oldpmap->pm_active);
5780 	CPU_SET(cpuid, &pmap->pm_active);
5781 #endif
5782 #ifdef PMAP_PAE_COMP
5783 	cr3 = vtophys(pmap->pm_pdpt);
5784 #else
5785 	cr3 = vtophys(pmap->pm_pdir);
5786 #endif
5787 	/*
5788 	 * pmap_activate is for the current thread on the current cpu
5789 	 */
5790 	td->td_pcb->pcb_cr3 = cr3;
5791 	PCPU_SET(curpmap, pmap);
5792 	critical_exit();
5793 }
5794 
5795 static void
5796 __CONCAT(PMTYPE, activate_boot)(pmap_t pmap)
5797 {
5798 	u_int cpuid;
5799 
5800 	cpuid = PCPU_GET(cpuid);
5801 #if defined(SMP)
5802 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
5803 #else
5804 	CPU_SET(cpuid, &pmap->pm_active);
5805 #endif
5806 	PCPU_SET(curpmap, pmap);
5807 }
5808 
5809 /*
5810  *	Increase the starting virtual address of the given mapping if a
5811  *	different alignment might result in more superpage mappings.
5812  */
5813 static void
5814 __CONCAT(PMTYPE, align_superpage)(vm_object_t object, vm_ooffset_t offset,
5815     vm_offset_t *addr, vm_size_t size)
5816 {
5817 	vm_offset_t superpage_offset;
5818 
5819 	if (size < NBPDR)
5820 		return;
5821 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5822 		offset += ptoa(object->pg_color);
5823 	superpage_offset = offset & PDRMASK;
5824 	if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
5825 	    (*addr & PDRMASK) == superpage_offset)
5826 		return;
5827 	if ((*addr & PDRMASK) < superpage_offset)
5828 		*addr = (*addr & ~PDRMASK) + superpage_offset;
5829 	else
5830 		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
5831 }
5832 
5833 static vm_offset_t
5834 __CONCAT(PMTYPE, quick_enter_page)(vm_page_t m)
5835 {
5836 	vm_offset_t qaddr;
5837 	pt_entry_t *pte;
5838 
5839 	critical_enter();
5840 	qaddr = PCPU_GET(qmap_addr);
5841 	pte = vtopte(qaddr);
5842 
5843 	KASSERT(*pte == 0,
5844 	    ("pmap_quick_enter_page: PTE busy %#jx", (uintmax_t)*pte));
5845 	*pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
5846 	    pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0);
5847 	invlpg(qaddr);
5848 
5849 	return (qaddr);
5850 }
5851 
5852 static void
5853 __CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr)
5854 {
5855 	vm_offset_t qaddr;
5856 	pt_entry_t *pte;
5857 
5858 	qaddr = PCPU_GET(qmap_addr);
5859 	pte = vtopte(qaddr);
5860 
5861 	KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use"));
5862 	KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address"));
5863 
5864 	*pte = 0;
5865 	critical_exit();
5866 }
5867 
5868 static vmem_t *pmap_trm_arena;
5869 static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS;
5870 static int trm_guard = PAGE_SIZE;
5871 
5872 static int
5873 pmap_trm_import(void *unused __unused, vmem_size_t size, int flags,
5874     vmem_addr_t *addrp)
5875 {
5876 	vm_page_t m;
5877 	vmem_addr_t af, addr, prev_addr;
5878 	pt_entry_t *trm_pte;
5879 
5880 	prev_addr = atomic_load_int(&pmap_trm_arena_last);
5881 	size = round_page(size) + trm_guard;
5882 	for (;;) {
5883 		if (prev_addr + size < prev_addr || prev_addr + size < size ||
5884 		    prev_addr + size > PMAP_TRM_MAX_ADDRESS)
5885 			return (ENOMEM);
5886 		addr = prev_addr + size;
5887 		if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr))
5888 			break;
5889 	}
5890 	prev_addr += trm_guard;
5891 	trm_pte = PTmap + atop(prev_addr);
5892 	for (af = prev_addr; af < addr; af += PAGE_SIZE) {
5893 		m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
5894 		pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) |
5895 		    PG_M | PG_A | PG_RW | PG_V | pgeflag |
5896 		    pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE));
5897 	}
5898 	*addrp = prev_addr;
5899 	return (0);
5900 }
5901 
5902 void
5903 pmap_init_trm(void)
5904 {
5905 	vm_page_t pd_m;
5906 
5907 	TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard);
5908 	if ((trm_guard & PAGE_MASK) != 0)
5909 		trm_guard = 0;
5910 	pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK);
5911 	vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE);
5912 	pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK |
5913 	    VM_ALLOC_ZERO);
5914 	PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V |
5915 	    pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE);
5916 }
5917 
5918 static void *
5919 __CONCAT(PMTYPE, trm_alloc)(size_t size, int flags)
5920 {
5921 	vmem_addr_t res;
5922 	int error;
5923 
5924 	MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0);
5925 	error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int),
5926 	    0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res);
5927 	if (error != 0)
5928 		return (NULL);
5929 	if ((flags & M_ZERO) != 0)
5930 		bzero((void *)res, size);
5931 	return ((void *)res);
5932 }
5933 
5934 static void
5935 __CONCAT(PMTYPE, trm_free)(void *addr, size_t size)
5936 {
5937 
5938 	vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4));
5939 }
5940 
5941 static void
5942 __CONCAT(PMTYPE, ksetrw)(vm_offset_t va)
5943 {
5944 
5945 	*vtopte(va) |= PG_RW;
5946 }
5947 
5948 static void
5949 __CONCAT(PMTYPE, remap_lowptdi)(bool enable)
5950 {
5951 
5952 	PTD[KPTDI] = enable ? PTD[LOWPTDI] : 0;
5953 	invltlb_glob();
5954 }
5955 
5956 static vm_offset_t
5957 __CONCAT(PMTYPE, get_map_low)(void)
5958 {
5959 
5960 	return (PMAP_MAP_LOW);
5961 }
5962 
5963 static vm_offset_t
5964 __CONCAT(PMTYPE, get_vm_maxuser_address)(void)
5965 {
5966 
5967 	return (VM_MAXUSER_ADDRESS);
5968 }
5969 
5970 static vm_paddr_t
5971 __CONCAT(PMTYPE, pg_frame)(vm_paddr_t pa)
5972 {
5973 
5974 	return (pa & PG_FRAME);
5975 }
5976 
5977 static void
5978 __CONCAT(PMTYPE, sf_buf_map)(struct sf_buf *sf)
5979 {
5980 	pt_entry_t opte, *ptep;
5981 
5982 	/*
5983 	 * Update the sf_buf's virtual-to-physical mapping, flushing the
5984 	 * virtual address from the TLB.  Since the reference count for
5985 	 * the sf_buf's old mapping was zero, that mapping is not
5986 	 * currently in use.  Consequently, there is no need to exchange
5987 	 * the old and new PTEs atomically, even under PAE.
5988 	 */
5989 	ptep = vtopte(sf->kva);
5990 	opte = *ptep;
5991 	*ptep = VM_PAGE_TO_PHYS(sf->m) | PG_RW | PG_V |
5992 	    pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, 0);
5993 
5994 	/*
5995 	 * Avoid unnecessary TLB invalidations: If the sf_buf's old
5996 	 * virtual-to-physical mapping was not used, then any processor
5997 	 * that has invalidated the sf_buf's virtual address from its TLB
5998 	 * since the last used mapping need not invalidate again.
5999 	 */
6000 #ifdef SMP
6001 	if ((opte & (PG_V | PG_A)) ==  (PG_V | PG_A))
6002 		CPU_ZERO(&sf->cpumask);
6003 #else
6004 	if ((opte & (PG_V | PG_A)) ==  (PG_V | PG_A))
6005 		pmap_invalidate_page_int(kernel_pmap, sf->kva);
6006 #endif
6007 }
6008 
6009 static void
6010 __CONCAT(PMTYPE, cp_slow0_map)(vm_offset_t kaddr, int plen, vm_page_t *ma)
6011 {
6012 	pt_entry_t *pte;
6013 	int i;
6014 
6015 	for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) {
6016 		*pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) |
6017 		    pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]),
6018 		    FALSE);
6019 		invlpg(kaddr + ptoa(i));
6020 	}
6021 }
6022 
6023 static u_int
6024 __CONCAT(PMTYPE, get_kcr3)(void)
6025 {
6026 
6027 #ifdef PMAP_PAE_COMP
6028 	return ((u_int)IdlePDPT);
6029 #else
6030 	return ((u_int)IdlePTD);
6031 #endif
6032 }
6033 
6034 static u_int
6035 __CONCAT(PMTYPE, get_cr3)(pmap_t pmap)
6036 {
6037 
6038 #ifdef PMAP_PAE_COMP
6039 	return ((u_int)vtophys(pmap->pm_pdpt));
6040 #else
6041 	return ((u_int)vtophys(pmap->pm_pdir));
6042 #endif
6043 }
6044 
6045 static caddr_t
6046 __CONCAT(PMTYPE, cmap3)(vm_paddr_t pa, u_int pte_bits)
6047 {
6048 	pt_entry_t *pte;
6049 
6050 	pte = CMAP3;
6051 	*pte = pa | pte_bits;
6052 	invltlb();
6053 	return (CADDR3);
6054 }
6055 
6056 static void
6057 __CONCAT(PMTYPE, basemem_setup)(u_int basemem)
6058 {
6059 	pt_entry_t *pte;
6060 	int i;
6061 
6062 	/*
6063 	 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
6064 	 * the vm86 page table so that vm86 can scribble on them using
6065 	 * the vm86 map too.  XXX: why 2 ways for this and only 1 way for
6066 	 * page 0, at least as initialized here?
6067 	 */
6068 	pte = (pt_entry_t *)vm86paddr;
6069 	for (i = basemem / 4; i < 160; i++)
6070 		pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
6071 }
6072 
6073 struct bios16_pmap_handle {
6074 	pt_entry_t	*pte;
6075 	pd_entry_t	*ptd;
6076 	pt_entry_t	orig_ptd;
6077 };
6078 
6079 static void *
6080 __CONCAT(PMTYPE, bios16_enter)(void)
6081 {
6082 	struct bios16_pmap_handle *h;
6083 
6084 	/*
6085 	 * no page table, so create one and install it.
6086 	 */
6087 	h = malloc(sizeof(struct bios16_pmap_handle), M_TEMP, M_WAITOK);
6088 	h->pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
6089 	h->ptd = IdlePTD;
6090 	*h->pte = vm86phystk | PG_RW | PG_V;
6091 	h->orig_ptd = *h->ptd;
6092 	*h->ptd = vtophys(h->pte) | PG_RW | PG_V;
6093 	pmap_invalidate_all_int(kernel_pmap);	/* XXX insurance for now */
6094 	return (h);
6095 }
6096 
6097 static void
6098 __CONCAT(PMTYPE, bios16_leave)(void *arg)
6099 {
6100 	struct bios16_pmap_handle *h;
6101 
6102 	h = arg;
6103 	*h->ptd = h->orig_ptd;		/* remove page table */
6104 	/*
6105 	 * XXX only needs to be invlpg(0) but that doesn't work on the 386
6106 	 */
6107 	pmap_invalidate_all_int(kernel_pmap);
6108 	free(h->pte, M_TEMP);		/* ... and free it */
6109 }
6110 
6111 struct pmap_kernel_map_range {
6112 	vm_offset_t sva;
6113 	pt_entry_t attrs;
6114 	int ptes;
6115 	int pdes;
6116 	int pdpes;
6117 };
6118 
6119 static void
6120 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
6121     vm_offset_t eva)
6122 {
6123 	const char *mode;
6124 	int i, pat_idx;
6125 
6126 	if (eva <= range->sva)
6127 		return;
6128 
6129 	pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true);
6130 	for (i = 0; i < PAT_INDEX_SIZE; i++)
6131 		if (pat_index[i] == pat_idx)
6132 			break;
6133 
6134 	switch (i) {
6135 	case PAT_WRITE_BACK:
6136 		mode = "WB";
6137 		break;
6138 	case PAT_WRITE_THROUGH:
6139 		mode = "WT";
6140 		break;
6141 	case PAT_UNCACHEABLE:
6142 		mode = "UC";
6143 		break;
6144 	case PAT_UNCACHED:
6145 		mode = "U-";
6146 		break;
6147 	case PAT_WRITE_PROTECTED:
6148 		mode = "WP";
6149 		break;
6150 	case PAT_WRITE_COMBINING:
6151 		mode = "WC";
6152 		break;
6153 	default:
6154 		printf("%s: unknown PAT mode %#x for range 0x%08x-0x%08x\n",
6155 		    __func__, pat_idx, range->sva, eva);
6156 		mode = "??";
6157 		break;
6158 	}
6159 
6160 	sbuf_printf(sb, "0x%08x-0x%08x r%c%c%c%c %s %d %d %d\n",
6161 	    range->sva, eva,
6162 	    (range->attrs & PG_RW) != 0 ? 'w' : '-',
6163 	    (range->attrs & pg_nx) != 0 ? '-' : 'x',
6164 	    (range->attrs & PG_U) != 0 ? 'u' : 's',
6165 	    (range->attrs & PG_G) != 0 ? 'g' : '-',
6166 	    mode, range->pdpes, range->pdes, range->ptes);
6167 
6168 	/* Reset to sentinel value. */
6169 	range->sva = 0xffffffff;
6170 }
6171 
6172 /*
6173  * Determine whether the attributes specified by a page table entry match those
6174  * being tracked by the current range.  This is not quite as simple as a direct
6175  * flag comparison since some PAT modes have multiple representations.
6176  */
6177 static bool
6178 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
6179 {
6180 	pt_entry_t diff, mask;
6181 
6182 	mask = pg_nx | PG_G | PG_RW | PG_U | PG_PDE_CACHE;
6183 	diff = (range->attrs ^ attrs) & mask;
6184 	if (diff == 0)
6185 		return (true);
6186 	if ((diff & ~PG_PDE_PAT) == 0 &&
6187 	    pmap_pat_index(kernel_pmap, range->attrs, true) ==
6188 	    pmap_pat_index(kernel_pmap, attrs, true))
6189 		return (true);
6190 	return (false);
6191 }
6192 
6193 static void
6194 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
6195     pt_entry_t attrs)
6196 {
6197 
6198 	memset(range, 0, sizeof(*range));
6199 	range->sva = va;
6200 	range->attrs = attrs;
6201 }
6202 
6203 /*
6204  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
6205  * those of the current run, dump the address range and its attributes, and
6206  * begin a new run.
6207  */
6208 static void
6209 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
6210     vm_offset_t va, pd_entry_t pde, pt_entry_t pte)
6211 {
6212 	pt_entry_t attrs;
6213 
6214 	attrs = pde & (PG_RW | PG_U | pg_nx);
6215 
6216 	if ((pde & PG_PS) != 0) {
6217 		attrs |= pde & (PG_G | PG_PDE_CACHE);
6218 	} else if (pte != 0) {
6219 		attrs |= pte & pg_nx;
6220 		attrs &= pg_nx | (pte & (PG_RW | PG_U));
6221 		attrs |= pte & (PG_G | PG_PTE_CACHE);
6222 
6223 		/* Canonicalize by always using the PDE PAT bit. */
6224 		if ((attrs & PG_PTE_PAT) != 0)
6225 			attrs ^= PG_PDE_PAT | PG_PTE_PAT;
6226 	}
6227 
6228 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
6229 		sysctl_kmaps_dump(sb, range, va);
6230 		sysctl_kmaps_reinit(range, va, attrs);
6231 	}
6232 }
6233 
6234 static int
6235 __CONCAT(PMTYPE, sysctl_kmaps)(SYSCTL_HANDLER_ARGS)
6236 {
6237 	struct pmap_kernel_map_range range;
6238 	struct sbuf sbuf, *sb;
6239 	pd_entry_t pde;
6240 	pt_entry_t *pt, pte;
6241 	vm_offset_t sva;
6242 	int error;
6243 	u_int i, k;
6244 
6245 	error = sysctl_wire_old_buffer(req, 0);
6246 	if (error != 0)
6247 		return (error);
6248 	sb = &sbuf;
6249 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
6250 
6251 	/* Sentinel value. */
6252 	range.sva = 0xffffffff;
6253 
6254 	/*
6255 	 * Iterate over the kernel page tables without holding the
6256 	 * kernel pmap lock.  Kernel page table pages are never freed,
6257 	 * so at worst we will observe inconsistencies in the output.
6258 	 */
6259 	for (sva = 0, i = 0; i < NPTEPG * NPGPTD * NPDEPG ;) {
6260 		if (i == 0)
6261 			sbuf_printf(sb, "\nLow PDE:\n");
6262 		else if (i == LOWPTDI * NPTEPG)
6263 			sbuf_printf(sb, "Low PDE dup:\n");
6264 		else if (i == PTDPTDI * NPTEPG)
6265 			sbuf_printf(sb, "Recursive map:\n");
6266 		else if (i == KERNPTDI * NPTEPG)
6267 			sbuf_printf(sb, "Kernel base:\n");
6268 		else if (i == TRPTDI * NPTEPG)
6269 			sbuf_printf(sb, "Trampoline:\n");
6270 		pde = IdlePTD[sva >> PDRSHIFT];
6271 		if ((pde & PG_V) == 0) {
6272 			sva = rounddown2(sva, NBPDR);
6273 			sysctl_kmaps_dump(sb, &range, sva);
6274 			sva += NBPDR;
6275 			i += NPTEPG;
6276 			continue;
6277 		}
6278 		if ((pde & PG_PS) != 0) {
6279 			sysctl_kmaps_check(sb, &range, sva, pde, 0);
6280 			range.pdes++;
6281 			sva += NBPDR;
6282 			i += NPTEPG;
6283 			continue;
6284 		}
6285 		for (pt = vtopte(sva), k = 0; k < NPTEPG; i++, k++, pt++,
6286 		    sva += PAGE_SIZE) {
6287 			pte = *pt;
6288 			if ((pte & PG_V) == 0) {
6289 				sysctl_kmaps_dump(sb, &range, sva);
6290 				continue;
6291 			}
6292 			sysctl_kmaps_check(sb, &range, sva, pde, pte);
6293 			range.ptes++;
6294 		}
6295 	}
6296 
6297 	error = sbuf_finish(sb);
6298 	sbuf_delete(sb);
6299 	return (error);
6300 }
6301 
6302 #define	PMM(a)					\
6303 	.pm_##a = __CONCAT(PMTYPE, a),
6304 
6305 struct pmap_methods __CONCAT(PMTYPE, methods) = {
6306 	PMM(ksetrw)
6307 	PMM(remap_lower)
6308 	PMM(remap_lowptdi)
6309 	PMM(align_superpage)
6310 	PMM(quick_enter_page)
6311 	PMM(quick_remove_page)
6312 	PMM(trm_alloc)
6313 	PMM(trm_free)
6314 	PMM(get_map_low)
6315 	PMM(get_vm_maxuser_address)
6316 	PMM(kextract)
6317 	PMM(pg_frame)
6318 	PMM(sf_buf_map)
6319 	PMM(cp_slow0_map)
6320 	PMM(get_kcr3)
6321 	PMM(get_cr3)
6322 	PMM(cmap3)
6323 	PMM(basemem_setup)
6324 	PMM(set_nx)
6325 	PMM(bios16_enter)
6326 	PMM(bios16_leave)
6327 	PMM(bootstrap)
6328 	PMM(is_valid_memattr)
6329 	PMM(cache_bits)
6330 	PMM(ps_enabled)
6331 	PMM(pinit0)
6332 	PMM(pinit)
6333 	PMM(activate)
6334 	PMM(activate_boot)
6335 	PMM(advise)
6336 	PMM(clear_modify)
6337 	PMM(change_attr)
6338 	PMM(mincore)
6339 	PMM(copy)
6340 	PMM(copy_page)
6341 	PMM(copy_pages)
6342 	PMM(zero_page)
6343 	PMM(zero_page_area)
6344 	PMM(enter)
6345 	PMM(enter_object)
6346 	PMM(enter_quick)
6347 	PMM(kenter_temporary)
6348 	PMM(object_init_pt)
6349 	PMM(unwire)
6350 	PMM(page_exists_quick)
6351 	PMM(page_wired_mappings)
6352 	PMM(page_is_mapped)
6353 	PMM(remove_pages)
6354 	PMM(is_modified)
6355 	PMM(is_prefaultable)
6356 	PMM(is_referenced)
6357 	PMM(remove_write)
6358 	PMM(ts_referenced)
6359 	PMM(mapdev_attr)
6360 	PMM(unmapdev)
6361 	PMM(page_set_memattr)
6362 	PMM(extract)
6363 	PMM(extract_and_hold)
6364 	PMM(map)
6365 	PMM(qenter)
6366 	PMM(qremove)
6367 	PMM(release)
6368 	PMM(remove)
6369 	PMM(protect)
6370 	PMM(remove_all)
6371 	PMM(init)
6372 	PMM(init_pat)
6373 	PMM(growkernel)
6374 	PMM(invalidate_page)
6375 	PMM(invalidate_range)
6376 	PMM(invalidate_all)
6377 	PMM(invalidate_cache)
6378 	PMM(flush_page)
6379 	PMM(kenter)
6380 	PMM(kremove)
6381 	PMM(sysctl_kmaps)
6382 };
6383