xref: /freebsd/sys/i386/i386/pmap_base.c (revision 42249ef2)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *	This product includes software developed by the University of
28  *	California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * All rights reserved.
50  * Copyright (c) 2018 The FreeBSD Foundation
51  * All rights reserved.
52  *
53  * This software was developed for the FreeBSD Project by Jake Burkholder,
54  * Safeport Network Services, and Network Associates Laboratories, the
55  * Security Research Division of Network Associates, Inc. under
56  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57  * CHATS research program.
58  *
59  * Portions of this software were developed by
60  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61  * the FreeBSD Foundation.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  *
72  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82  * SUCH DAMAGE.
83  */
84 
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
87 
88 #include "opt_apic.h"
89 #include "opt_cpu.h"
90 #include "opt_pmap.h"
91 #include "opt_smp.h"
92 #include "opt_vm.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/vmmeter.h>
98 #include <sys/sysctl.h>
99 #include <machine/bootinfo.h>
100 #include <machine/cpu.h>
101 #include <machine/cputypes.h>
102 #include <machine/md_var.h>
103 #ifdef DEV_APIC
104 #include <sys/bus.h>
105 #include <machine/intr_machdep.h>
106 #include <x86/apicvar.h>
107 #endif
108 #include <x86/ifunc.h>
109 
110 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
111 
112 #include <machine/vmparam.h>
113 #include <vm/vm.h>
114 #include <vm/vm_page.h>
115 #include <vm/pmap.h>
116 #include <machine/pmap_base.h>
117 
118 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
119 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
120 
121 int unmapped_buf_allowed = 1;
122 
123 int pti;
124 
125 u_long physfree;	/* phys addr of next free page */
126 u_long vm86phystk;	/* PA of vm86/bios stack */
127 u_long vm86paddr;	/* address of vm86 region */
128 int vm86pa;		/* phys addr of vm86 region */
129 u_long KERNend;		/* phys addr end of kernel (just after bss) */
130 u_long KPTphys;		/* phys addr of kernel page tables */
131 caddr_t ptvmmap = 0;
132 vm_offset_t kernel_vm_end;
133 
134 int i386_pmap_VM_NFREEORDER;
135 int i386_pmap_VM_LEVEL_0_ORDER;
136 int i386_pmap_PDRSHIFT;
137 
138 int pat_works = 1;
139 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
140     &pat_works, 0,
141     "Is page attribute table fully functional?");
142 
143 int pg_ps_enabled = 1;
144 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
145     &pg_ps_enabled, 0,
146     "Are large page mappings enabled?");
147 
148 int pv_entry_max = 0;
149 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
150     &pv_entry_max, 0,
151     "Max number of PV entries");
152 
153 int pv_entry_count = 0;
154 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
155     &pv_entry_count, 0,
156     "Current number of pv entries");
157 
158 #ifndef PMAP_SHPGPERPROC
159 #define PMAP_SHPGPERPROC 200
160 #endif
161 
162 int shpgperproc = PMAP_SHPGPERPROC;
163 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
164     &shpgperproc, 0,
165     "Page share factor per proc");
166 
167 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
168     "2/4MB page mapping counters");
169 
170 u_long pmap_pde_demotions;
171 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
172     &pmap_pde_demotions, 0,
173     "2/4MB page demotions");
174 
175 u_long pmap_pde_mappings;
176 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
177     &pmap_pde_mappings, 0,
178     "2/4MB page mappings");
179 
180 u_long pmap_pde_p_failures;
181 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
182     &pmap_pde_p_failures, 0,
183     "2/4MB page promotion failures");
184 
185 u_long pmap_pde_promotions;
186 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
187     &pmap_pde_promotions, 0,
188     "2/4MB page promotions");
189 
190 #ifdef SMP
191 int PMAP1changedcpu;
192 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
193     &PMAP1changedcpu, 0,
194     "Number of times pmap_pte_quick changed CPU with same PMAP1");
195 #endif
196 
197 int PMAP1changed;
198 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
199     &PMAP1changed, 0,
200     "Number of times pmap_pte_quick changed PMAP1");
201 int PMAP1unchanged;
202 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
203     &PMAP1unchanged, 0,
204     "Number of times pmap_pte_quick didn't change PMAP1");
205 
206 static int
207 kvm_size(SYSCTL_HANDLER_ARGS)
208 {
209 	unsigned long ksize;
210 
211 	ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
212 	return (sysctl_handle_long(oidp, &ksize, 0, req));
213 }
214 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
215     0, 0, kvm_size, "IU",
216     "Size of KVM");
217 
218 static int
219 kvm_free(SYSCTL_HANDLER_ARGS)
220 {
221 	unsigned long kfree;
222 
223 	kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
224 	return (sysctl_handle_long(oidp, &kfree, 0, req));
225 }
226 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
227     0, 0, kvm_free, "IU",
228     "Amount of KVM free");
229 
230 #ifdef PV_STATS
231 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
232 long pv_entry_frees, pv_entry_allocs;
233 int pv_entry_spare;
234 
235 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
236     &pc_chunk_count, 0,
237     "Current number of pv entry chunks");
238 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
239     &pc_chunk_allocs, 0,
240     "Current number of pv entry chunks allocated");
241 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
242     &pc_chunk_frees, 0,
243     "Current number of pv entry chunks frees");
244 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
245     &pc_chunk_tryfail, 0,
246     "Number of times tried to get a chunk page but failed.");
247 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
248     &pv_entry_frees, 0,
249     "Current number of pv entry frees");
250 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
251     &pv_entry_allocs, 0,
252     "Current number of pv entry allocs");
253 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
254     &pv_entry_spare, 0,
255     "Current number of spare pv entries");
256 #endif
257 
258 struct pmap kernel_pmap_store;
259 static struct pmap_methods *pmap_methods_ptr;
260 
261 /*
262  * Initialize a vm_page's machine-dependent fields.
263  */
264 void
265 pmap_page_init(vm_page_t m)
266 {
267 
268 	TAILQ_INIT(&m->md.pv_list);
269 	m->md.pat_mode = PAT_WRITE_BACK;
270 }
271 
272 void
273 invltlb_glob(void)
274 {
275 
276 	invltlb();
277 }
278 
279 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
280     vm_offset_t eva);
281 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
282     vm_offset_t eva);
283 
284 void
285 pmap_flush_page(vm_page_t m)
286 {
287 
288 	pmap_methods_ptr->pm_flush_page(m);
289 }
290 
291 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t))
292 {
293 
294 	if ((cpu_feature & CPUID_SS) != 0)
295 		return (pmap_invalidate_cache_range_selfsnoop);
296 	if ((cpu_feature & CPUID_CLFSH) != 0)
297 		return (pmap_force_invalidate_cache_range);
298 	return (pmap_invalidate_cache_range_all);
299 }
300 
301 #define	PMAP_CLFLUSH_THRESHOLD	(2 * 1024 * 1024)
302 
303 static void
304 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
305 {
306 
307 	KASSERT((sva & PAGE_MASK) == 0,
308 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
309 	KASSERT((eva & PAGE_MASK) == 0,
310 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
311 }
312 
313 static void
314 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
315 {
316 
317 	pmap_invalidate_cache_range_check_align(sva, eva);
318 }
319 
320 void
321 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
322 {
323 
324 	sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
325 	if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
326 		/*
327 		 * The supplied range is bigger than 2MB.
328 		 * Globally invalidate cache.
329 		 */
330 		pmap_invalidate_cache();
331 		return;
332 	}
333 
334 #ifdef DEV_APIC
335 	/*
336 	 * XXX: Some CPUs fault, hang, or trash the local APIC
337 	 * registers if we use CLFLUSH on the local APIC
338 	 * range.  The local APIC is always uncached, so we
339 	 * don't need to flush for that range anyway.
340 	 */
341 	if (pmap_kextract(sva) == lapic_paddr)
342 		return;
343 #endif
344 
345 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
346 		/*
347 		 * Do per-cache line flush.  Use the sfence
348 		 * instruction to insure that previous stores are
349 		 * included in the write-back.  The processor
350 		 * propagates flush to other processors in the cache
351 		 * coherence domain.
352 		 */
353 		sfence();
354 		for (; sva < eva; sva += cpu_clflush_line_size)
355 			clflushopt(sva);
356 		sfence();
357 	} else {
358 		/*
359 		 * Writes are ordered by CLFLUSH on Intel CPUs.
360 		 */
361 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
362 			mfence();
363 		for (; sva < eva; sva += cpu_clflush_line_size)
364 			clflush(sva);
365 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
366 			mfence();
367 	}
368 }
369 
370 static void
371 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
372 {
373 
374 	pmap_invalidate_cache_range_check_align(sva, eva);
375 	pmap_invalidate_cache();
376 }
377 
378 void
379 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
380 {
381 	int i;
382 
383 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
384 	    (cpu_feature & CPUID_CLFSH) == 0) {
385 		pmap_invalidate_cache();
386 	} else {
387 		for (i = 0; i < count; i++)
388 			pmap_flush_page(pages[i]);
389 	}
390 }
391 
392 void
393 pmap_ksetrw(vm_offset_t va)
394 {
395 
396 	pmap_methods_ptr->pm_ksetrw(va);
397 }
398 
399 void
400 pmap_remap_lower(bool enable)
401 {
402 
403 	pmap_methods_ptr->pm_remap_lower(enable);
404 }
405 
406 void
407 pmap_remap_lowptdi(bool enable)
408 {
409 
410 	pmap_methods_ptr->pm_remap_lowptdi(enable);
411 }
412 
413 void
414 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
415     vm_offset_t *addr, vm_size_t size)
416 {
417 
418 	return (pmap_methods_ptr->pm_align_superpage(object, offset,
419 	    addr, size));
420 }
421 
422 vm_offset_t
423 pmap_quick_enter_page(vm_page_t m)
424 {
425 
426 	return (pmap_methods_ptr->pm_quick_enter_page(m));
427 }
428 
429 void
430 pmap_quick_remove_page(vm_offset_t addr)
431 {
432 
433 	return (pmap_methods_ptr->pm_quick_remove_page(addr));
434 }
435 
436 void *
437 pmap_trm_alloc(size_t size, int flags)
438 {
439 
440 	return (pmap_methods_ptr->pm_trm_alloc(size, flags));
441 }
442 
443 void
444 pmap_trm_free(void *addr, size_t size)
445 {
446 
447 	pmap_methods_ptr->pm_trm_free(addr, size);
448 }
449 
450 void
451 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
452 {
453 }
454 
455 vm_offset_t
456 pmap_get_map_low(void)
457 {
458 
459 	return (pmap_methods_ptr->pm_get_map_low());
460 }
461 
462 vm_offset_t
463 pmap_get_vm_maxuser_address(void)
464 {
465 
466 	return (pmap_methods_ptr->pm_get_vm_maxuser_address());
467 }
468 
469 vm_paddr_t
470 pmap_kextract(vm_offset_t va)
471 {
472 
473 	return (pmap_methods_ptr->pm_kextract(va));
474 }
475 
476 vm_paddr_t
477 pmap_pg_frame(vm_paddr_t pa)
478 {
479 
480 	return (pmap_methods_ptr->pm_pg_frame(pa));
481 }
482 
483 void
484 pmap_sf_buf_map(struct sf_buf *sf)
485 {
486 
487 	pmap_methods_ptr->pm_sf_buf_map(sf);
488 }
489 
490 void
491 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
492 {
493 
494 	pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
495 }
496 
497 u_int
498 pmap_get_kcr3(void)
499 {
500 
501 	return (pmap_methods_ptr->pm_get_kcr3());
502 }
503 
504 u_int
505 pmap_get_cr3(pmap_t pmap)
506 {
507 
508 	return (pmap_methods_ptr->pm_get_cr3(pmap));
509 }
510 
511 caddr_t
512 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
513 {
514 
515 	return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
516 }
517 
518 void
519 pmap_basemem_setup(u_int basemem)
520 {
521 
522 	pmap_methods_ptr->pm_basemem_setup(basemem);
523 }
524 
525 void
526 pmap_set_nx(void)
527 {
528 
529 	pmap_methods_ptr->pm_set_nx();
530 }
531 
532 void *
533 pmap_bios16_enter(void)
534 {
535 
536 	return (pmap_methods_ptr->pm_bios16_enter());
537 }
538 
539 void
540 pmap_bios16_leave(void *handle)
541 {
542 
543 	pmap_methods_ptr->pm_bios16_leave(handle);
544 }
545 
546 void
547 pmap_bootstrap(vm_paddr_t firstaddr)
548 {
549 
550 	pmap_methods_ptr->pm_bootstrap(firstaddr);
551 }
552 
553 boolean_t
554 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
555 {
556 
557 	return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
558 }
559 
560 int
561 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
562 {
563 
564 	return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
565 }
566 
567 bool
568 pmap_ps_enabled(pmap_t pmap)
569 {
570 
571 	return (pmap_methods_ptr->pm_ps_enabled(pmap));
572 }
573 
574 void
575 pmap_pinit0(pmap_t pmap)
576 {
577 
578 	pmap_methods_ptr->pm_pinit0(pmap);
579 }
580 
581 int
582 pmap_pinit(pmap_t pmap)
583 {
584 
585 	return (pmap_methods_ptr->pm_pinit(pmap));
586 }
587 
588 void
589 pmap_activate(struct thread *td)
590 {
591 
592 	pmap_methods_ptr->pm_activate(td);
593 }
594 
595 void
596 pmap_activate_boot(pmap_t pmap)
597 {
598 
599 	pmap_methods_ptr->pm_activate_boot(pmap);
600 }
601 
602 void
603 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
604 {
605 
606 	pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
607 }
608 
609 void
610 pmap_clear_modify(vm_page_t m)
611 {
612 
613 	pmap_methods_ptr->pm_clear_modify(m);
614 }
615 
616 int
617 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
618 {
619 
620 	return (pmap_methods_ptr->pm_change_attr(va, size, mode));
621 }
622 
623 int
624 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
625 {
626 
627 	return (pmap_methods_ptr->pm_mincore(pmap, addr, locked_pa));
628 }
629 
630 void
631 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
632     vm_offset_t src_addr)
633 {
634 
635 	pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
636 }
637 
638 void
639 pmap_copy_page(vm_page_t src, vm_page_t dst)
640 {
641 
642 	pmap_methods_ptr->pm_copy_page(src, dst);
643 }
644 
645 void
646 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
647     vm_offset_t b_offset, int xfersize)
648 {
649 
650 	pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
651 }
652 
653 void
654 pmap_zero_page(vm_page_t m)
655 {
656 
657 	pmap_methods_ptr->pm_zero_page(m);
658 }
659 
660 void
661 pmap_zero_page_area(vm_page_t m, int off, int size)
662 {
663 
664 	pmap_methods_ptr->pm_zero_page_area(m, off, size);
665 }
666 
667 int
668 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
669     u_int flags, int8_t psind)
670 {
671 
672 	return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
673 }
674 
675 void
676 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
677     vm_page_t m_start, vm_prot_t prot)
678 {
679 
680 	pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
681 }
682 
683 void
684 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
685 {
686 
687 	pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
688 }
689 
690 void *
691 pmap_kenter_temporary(vm_paddr_t pa, int i)
692 {
693 
694 	return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
695 }
696 
697 void
698 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
699     vm_pindex_t pindex, vm_size_t size)
700 {
701 
702 	pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
703 }
704 
705 void
706 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
707 {
708 
709 	pmap_methods_ptr->pm_unwire(pmap, sva, eva);
710 }
711 
712 boolean_t
713 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
714 {
715 
716 	return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
717 }
718 
719 int
720 pmap_page_wired_mappings(vm_page_t m)
721 {
722 
723 	return (pmap_methods_ptr->pm_page_wired_mappings(m));
724 }
725 
726 boolean_t
727 pmap_page_is_mapped(vm_page_t m)
728 {
729 
730 	return (pmap_methods_ptr->pm_page_is_mapped(m));
731 }
732 
733 void
734 pmap_remove_pages(pmap_t pmap)
735 {
736 
737 	pmap_methods_ptr->pm_remove_pages(pmap);
738 }
739 
740 boolean_t
741 pmap_is_modified(vm_page_t m)
742 {
743 
744 	return (pmap_methods_ptr->pm_is_modified(m));
745 }
746 
747 boolean_t
748 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
749 {
750 
751 	return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
752 }
753 
754 boolean_t
755 pmap_is_referenced(vm_page_t m)
756 {
757 
758 	return (pmap_methods_ptr->pm_is_referenced(m));
759 }
760 
761 void
762 pmap_remove_write(vm_page_t m)
763 {
764 
765 	pmap_methods_ptr->pm_remove_write(m);
766 }
767 
768 int
769 pmap_ts_referenced(vm_page_t m)
770 {
771 
772 	return (pmap_methods_ptr->pm_ts_referenced(m));
773 }
774 
775 void *
776 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
777 {
778 
779 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
780 	    MAPDEV_SETATTR));
781 }
782 
783 void *
784 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
785 {
786 
787 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
788 	    MAPDEV_SETATTR));
789 }
790 
791 void *
792 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
793 {
794 
795 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
796 }
797 
798 void
799 pmap_unmapdev(vm_offset_t va, vm_size_t size)
800 {
801 
802 	pmap_methods_ptr->pm_unmapdev(va, size);
803 }
804 
805 void
806 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
807 {
808 
809 	pmap_methods_ptr->pm_page_set_memattr(m, ma);
810 }
811 
812 vm_paddr_t
813 pmap_extract(pmap_t pmap, vm_offset_t va)
814 {
815 
816 	return (pmap_methods_ptr->pm_extract(pmap, va));
817 }
818 
819 vm_page_t
820 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
821 {
822 
823 	return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
824 }
825 
826 vm_offset_t
827 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
828 {
829 
830 	return (pmap_methods_ptr->pm_map(virt, start, end, prot));
831 }
832 
833 void
834 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
835 {
836 
837 	pmap_methods_ptr->pm_qenter(sva, ma, count);
838 }
839 
840 void
841 pmap_qremove(vm_offset_t sva, int count)
842 {
843 
844 	pmap_methods_ptr->pm_qremove(sva, count);
845 }
846 
847 void
848 pmap_release(pmap_t pmap)
849 {
850 
851 	pmap_methods_ptr->pm_release(pmap);
852 }
853 
854 void
855 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
856 {
857 
858 	pmap_methods_ptr->pm_remove(pmap, sva, eva);
859 }
860 
861 void
862 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
863 {
864 
865 	pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
866 }
867 
868 void
869 pmap_remove_all(vm_page_t m)
870 {
871 
872 	pmap_methods_ptr->pm_remove_all(m);
873 }
874 
875 void
876 pmap_init(void)
877 {
878 
879 	pmap_methods_ptr->pm_init();
880 }
881 
882 void
883 pmap_init_pat(void)
884 {
885 
886 	pmap_methods_ptr->pm_init_pat();
887 }
888 
889 void
890 pmap_growkernel(vm_offset_t addr)
891 {
892 
893 	pmap_methods_ptr->pm_growkernel(addr);
894 }
895 
896 void
897 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
898 {
899 
900 	pmap_methods_ptr->pm_invalidate_page(pmap, va);
901 }
902 
903 void
904 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
905 {
906 
907 	pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
908 }
909 
910 void
911 pmap_invalidate_all(pmap_t pmap)
912 {
913 
914 	pmap_methods_ptr->pm_invalidate_all(pmap);
915 }
916 
917 void
918 pmap_invalidate_cache(void)
919 {
920 
921 	pmap_methods_ptr->pm_invalidate_cache();
922 }
923 
924 void
925 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
926 {
927 
928 	pmap_methods_ptr->pm_kenter(va, pa);
929 }
930 
931 void
932 pmap_kremove(vm_offset_t va)
933 {
934 
935 	pmap_methods_ptr->pm_kremove(va);
936 }
937 
938 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
939 int pae_mode;
940 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
941     &pae_mode, 0,
942     "PAE");
943 
944 void
945 pmap_cold(void)
946 {
947 
948 	init_static_kenv((char *)bootinfo.bi_envp, 0);
949 	pae_mode = (cpu_feature & CPUID_PAE) != 0;
950 	if (pae_mode)
951 		TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
952 	if (pae_mode) {
953 		pmap_methods_ptr = &pmap_pae_methods;
954 		pmap_pae_cold();
955 	} else {
956 		pmap_methods_ptr = &pmap_nopae_methods;
957 		pmap_nopae_cold();
958 	}
959 }
960