xref: /freebsd/sys/i386/i386/pmap_base.c (revision 1d386b48)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *	This product includes software developed by the University of
28  *	California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * All rights reserved.
50  * Copyright (c) 2018 The FreeBSD Foundation
51  * All rights reserved.
52  *
53  * This software was developed for the FreeBSD Project by Jake Burkholder,
54  * Safeport Network Services, and Network Associates Laboratories, the
55  * Security Research Division of Network Associates, Inc. under
56  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57  * CHATS research program.
58  *
59  * Portions of this software were developed by
60  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61  * the FreeBSD Foundation.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  *
72  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82  * SUCH DAMAGE.
83  */
84 
85 #include <sys/cdefs.h>
86 #include "opt_apic.h"
87 #include "opt_cpu.h"
88 #include "opt_pmap.h"
89 #include "opt_smp.h"
90 #include "opt_vm.h"
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/kernel.h>
95 #include <sys/vmmeter.h>
96 #include <sys/sysctl.h>
97 #include <machine/bootinfo.h>
98 #include <machine/cpu.h>
99 #include <machine/cputypes.h>
100 #include <machine/md_var.h>
101 #ifdef DEV_APIC
102 #include <sys/bus.h>
103 #include <machine/intr_machdep.h>
104 #include <x86/apicvar.h>
105 #endif
106 #include <x86/ifunc.h>
107 
108 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
109     "VM/pmap parameters");
110 
111 #include <machine/vmparam.h>
112 #include <vm/vm.h>
113 #include <vm/vm_page.h>
114 #include <vm/pmap.h>
115 #include <machine/pmap_base.h>
116 
117 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
118 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
119 
120 int unmapped_buf_allowed = 1;
121 
122 int pti;
123 
124 u_long physfree;	/* phys addr of next free page */
125 u_long vm86phystk;	/* PA of vm86/bios stack */
126 u_long vm86paddr;	/* address of vm86 region */
127 int vm86pa;		/* phys addr of vm86 region */
128 u_long KERNend;		/* phys addr end of kernel (just after bss) */
129 u_long KPTphys;		/* phys addr of kernel page tables */
130 caddr_t ptvmmap = 0;
131 vm_offset_t kernel_vm_end;
132 
133 int i386_pmap_VM_NFREEORDER;
134 int i386_pmap_VM_LEVEL_0_ORDER;
135 int i386_pmap_PDRSHIFT;
136 
137 int pat_works = 1;
138 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
139     &pat_works, 0,
140     "Is page attribute table fully functional?");
141 
142 int pg_ps_enabled = 1;
143 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
144     &pg_ps_enabled, 0,
145     "Are large page mappings enabled?");
146 
147 int pv_entry_max = 0;
148 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
149     &pv_entry_max, 0,
150     "Max number of PV entries");
151 
152 int pv_entry_count = 0;
153 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
154     &pv_entry_count, 0,
155     "Current number of pv entries");
156 
157 #ifndef PMAP_SHPGPERPROC
158 #define PMAP_SHPGPERPROC 200
159 #endif
160 
161 int shpgperproc = PMAP_SHPGPERPROC;
162 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
163     &shpgperproc, 0,
164     "Page share factor per proc");
165 
166 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
167     "2/4MB page mapping counters");
168 
169 u_long pmap_pde_demotions;
170 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
171     &pmap_pde_demotions, 0,
172     "2/4MB page demotions");
173 
174 u_long pmap_pde_mappings;
175 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
176     &pmap_pde_mappings, 0,
177     "2/4MB page mappings");
178 
179 u_long pmap_pde_p_failures;
180 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
181     &pmap_pde_p_failures, 0,
182     "2/4MB page promotion failures");
183 
184 u_long pmap_pde_promotions;
185 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
186     &pmap_pde_promotions, 0,
187     "2/4MB page promotions");
188 
189 #ifdef SMP
190 int PMAP1changedcpu;
191 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
192     &PMAP1changedcpu, 0,
193     "Number of times pmap_pte_quick changed CPU with same PMAP1");
194 #endif
195 
196 int PMAP1changed;
197 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
198     &PMAP1changed, 0,
199     "Number of times pmap_pte_quick changed PMAP1");
200 int PMAP1unchanged;
201 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
202     &PMAP1unchanged, 0,
203     "Number of times pmap_pte_quick didn't change PMAP1");
204 
205 static int
206 kvm_size(SYSCTL_HANDLER_ARGS)
207 {
208 	unsigned long ksize;
209 
210 	ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
211 	return (sysctl_handle_long(oidp, &ksize, 0, req));
212 }
213 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
214     0, 0, kvm_size, "IU",
215     "Size of KVM");
216 
217 static int
218 kvm_free(SYSCTL_HANDLER_ARGS)
219 {
220 	unsigned long kfree;
221 
222 	kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
223 	return (sysctl_handle_long(oidp, &kfree, 0, req));
224 }
225 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
226     0, 0, kvm_free, "IU",
227     "Amount of KVM free");
228 
229 #ifdef PV_STATS
230 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
231 long pv_entry_frees, pv_entry_allocs;
232 int pv_entry_spare;
233 
234 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
235     &pc_chunk_count, 0,
236     "Current number of pv entry chunks");
237 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
238     &pc_chunk_allocs, 0,
239     "Current number of pv entry chunks allocated");
240 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
241     &pc_chunk_frees, 0,
242     "Current number of pv entry chunks frees");
243 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
244     &pc_chunk_tryfail, 0,
245     "Number of times tried to get a chunk page but failed.");
246 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
247     &pv_entry_frees, 0,
248     "Current number of pv entry frees");
249 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
250     &pv_entry_allocs, 0,
251     "Current number of pv entry allocs");
252 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
253     &pv_entry_spare, 0,
254     "Current number of spare pv entries");
255 #endif
256 
257 struct pmap kernel_pmap_store;
258 static struct pmap_methods *pmap_methods_ptr;
259 
260 static int
261 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
262 {
263 	return (pmap_methods_ptr->pm_sysctl_kmaps(oidp, arg1, arg2, req));
264 }
265 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
266     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
267     NULL, 0, sysctl_kmaps, "A",
268     "Dump kernel address layout");
269 
270 /*
271  * Initialize a vm_page's machine-dependent fields.
272  */
273 void
274 pmap_page_init(vm_page_t m)
275 {
276 
277 	TAILQ_INIT(&m->md.pv_list);
278 	m->md.pat_mode = PAT_WRITE_BACK;
279 }
280 
281 void
282 invltlb_glob(void)
283 {
284 
285 	invltlb();
286 }
287 
288 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
289     vm_offset_t eva);
290 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
291     vm_offset_t eva);
292 
293 void
294 pmap_flush_page(vm_page_t m)
295 {
296 
297 	pmap_methods_ptr->pm_flush_page(m);
298 }
299 
300 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t))
301 {
302 
303 	if ((cpu_feature & CPUID_SS) != 0)
304 		return (pmap_invalidate_cache_range_selfsnoop);
305 	if ((cpu_feature & CPUID_CLFSH) != 0)
306 		return (pmap_force_invalidate_cache_range);
307 	return (pmap_invalidate_cache_range_all);
308 }
309 
310 #define	PMAP_CLFLUSH_THRESHOLD	(2 * 1024 * 1024)
311 
312 static void
313 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
314 {
315 
316 	KASSERT((sva & PAGE_MASK) == 0,
317 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
318 	KASSERT((eva & PAGE_MASK) == 0,
319 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
320 }
321 
322 static void
323 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
324 {
325 
326 	pmap_invalidate_cache_range_check_align(sva, eva);
327 }
328 
329 void
330 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
331 {
332 
333 	sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
334 	if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
335 		/*
336 		 * The supplied range is bigger than 2MB.
337 		 * Globally invalidate cache.
338 		 */
339 		pmap_invalidate_cache();
340 		return;
341 	}
342 
343 #ifdef DEV_APIC
344 	/*
345 	 * XXX: Some CPUs fault, hang, or trash the local APIC
346 	 * registers if we use CLFLUSH on the local APIC
347 	 * range.  The local APIC is always uncached, so we
348 	 * don't need to flush for that range anyway.
349 	 */
350 	if (pmap_kextract(sva) == lapic_paddr)
351 		return;
352 #endif
353 
354 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
355 		/*
356 		 * Do per-cache line flush.  Use the sfence
357 		 * instruction to insure that previous stores are
358 		 * included in the write-back.  The processor
359 		 * propagates flush to other processors in the cache
360 		 * coherence domain.
361 		 */
362 		sfence();
363 		for (; sva < eva; sva += cpu_clflush_line_size)
364 			clflushopt(sva);
365 		sfence();
366 	} else {
367 		/*
368 		 * Writes are ordered by CLFLUSH on Intel CPUs.
369 		 */
370 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
371 			mfence();
372 		for (; sva < eva; sva += cpu_clflush_line_size)
373 			clflush(sva);
374 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
375 			mfence();
376 	}
377 }
378 
379 static void
380 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
381 {
382 
383 	pmap_invalidate_cache_range_check_align(sva, eva);
384 	pmap_invalidate_cache();
385 }
386 
387 void
388 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
389 {
390 	int i;
391 
392 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
393 	    (cpu_feature & CPUID_CLFSH) == 0) {
394 		pmap_invalidate_cache();
395 	} else {
396 		for (i = 0; i < count; i++)
397 			pmap_flush_page(pages[i]);
398 	}
399 }
400 
401 void
402 pmap_ksetrw(vm_offset_t va)
403 {
404 
405 	pmap_methods_ptr->pm_ksetrw(va);
406 }
407 
408 void
409 pmap_remap_lower(bool enable)
410 {
411 
412 	pmap_methods_ptr->pm_remap_lower(enable);
413 }
414 
415 void
416 pmap_remap_lowptdi(bool enable)
417 {
418 
419 	pmap_methods_ptr->pm_remap_lowptdi(enable);
420 }
421 
422 void
423 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
424     vm_offset_t *addr, vm_size_t size)
425 {
426 
427 	return (pmap_methods_ptr->pm_align_superpage(object, offset,
428 	    addr, size));
429 }
430 
431 vm_offset_t
432 pmap_quick_enter_page(vm_page_t m)
433 {
434 
435 	return (pmap_methods_ptr->pm_quick_enter_page(m));
436 }
437 
438 void
439 pmap_quick_remove_page(vm_offset_t addr)
440 {
441 
442 	return (pmap_methods_ptr->pm_quick_remove_page(addr));
443 }
444 
445 void *
446 pmap_trm_alloc(size_t size, int flags)
447 {
448 
449 	return (pmap_methods_ptr->pm_trm_alloc(size, flags));
450 }
451 
452 void
453 pmap_trm_free(void *addr, size_t size)
454 {
455 
456 	pmap_methods_ptr->pm_trm_free(addr, size);
457 }
458 
459 void
460 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
461 {
462 }
463 
464 vm_offset_t
465 pmap_get_map_low(void)
466 {
467 
468 	return (pmap_methods_ptr->pm_get_map_low());
469 }
470 
471 vm_offset_t
472 pmap_get_vm_maxuser_address(void)
473 {
474 
475 	return (pmap_methods_ptr->pm_get_vm_maxuser_address());
476 }
477 
478 vm_paddr_t
479 pmap_kextract(vm_offset_t va)
480 {
481 
482 	return (pmap_methods_ptr->pm_kextract(va));
483 }
484 
485 vm_paddr_t
486 pmap_pg_frame(vm_paddr_t pa)
487 {
488 
489 	return (pmap_methods_ptr->pm_pg_frame(pa));
490 }
491 
492 void
493 pmap_sf_buf_map(struct sf_buf *sf)
494 {
495 
496 	pmap_methods_ptr->pm_sf_buf_map(sf);
497 }
498 
499 void
500 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
501 {
502 
503 	pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
504 }
505 
506 u_int
507 pmap_get_kcr3(void)
508 {
509 
510 	return (pmap_methods_ptr->pm_get_kcr3());
511 }
512 
513 u_int
514 pmap_get_cr3(pmap_t pmap)
515 {
516 
517 	return (pmap_methods_ptr->pm_get_cr3(pmap));
518 }
519 
520 caddr_t
521 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
522 {
523 
524 	return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
525 }
526 
527 void
528 pmap_basemem_setup(u_int basemem)
529 {
530 
531 	pmap_methods_ptr->pm_basemem_setup(basemem);
532 }
533 
534 void
535 pmap_set_nx(void)
536 {
537 
538 	pmap_methods_ptr->pm_set_nx();
539 }
540 
541 void *
542 pmap_bios16_enter(void)
543 {
544 
545 	return (pmap_methods_ptr->pm_bios16_enter());
546 }
547 
548 void
549 pmap_bios16_leave(void *handle)
550 {
551 
552 	pmap_methods_ptr->pm_bios16_leave(handle);
553 }
554 
555 void
556 pmap_bootstrap(vm_paddr_t firstaddr)
557 {
558 
559 	pmap_methods_ptr->pm_bootstrap(firstaddr);
560 }
561 
562 boolean_t
563 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
564 {
565 
566 	return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
567 }
568 
569 int
570 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
571 {
572 
573 	return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
574 }
575 
576 bool
577 pmap_ps_enabled(pmap_t pmap)
578 {
579 
580 	return (pmap_methods_ptr->pm_ps_enabled(pmap));
581 }
582 
583 void
584 pmap_pinit0(pmap_t pmap)
585 {
586 
587 	pmap_methods_ptr->pm_pinit0(pmap);
588 }
589 
590 int
591 pmap_pinit(pmap_t pmap)
592 {
593 
594 	return (pmap_methods_ptr->pm_pinit(pmap));
595 }
596 
597 void
598 pmap_activate(struct thread *td)
599 {
600 
601 	pmap_methods_ptr->pm_activate(td);
602 }
603 
604 void
605 pmap_activate_boot(pmap_t pmap)
606 {
607 
608 	pmap_methods_ptr->pm_activate_boot(pmap);
609 }
610 
611 void
612 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
613 {
614 
615 	pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
616 }
617 
618 void
619 pmap_clear_modify(vm_page_t m)
620 {
621 
622 	pmap_methods_ptr->pm_clear_modify(m);
623 }
624 
625 int
626 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
627 {
628 
629 	return (pmap_methods_ptr->pm_change_attr(va, size, mode));
630 }
631 
632 int
633 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
634 {
635 
636 	return (pmap_methods_ptr->pm_mincore(pmap, addr, pap));
637 }
638 
639 void
640 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
641     vm_offset_t src_addr)
642 {
643 
644 	pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
645 }
646 
647 void
648 pmap_copy_page(vm_page_t src, vm_page_t dst)
649 {
650 
651 	pmap_methods_ptr->pm_copy_page(src, dst);
652 }
653 
654 void
655 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
656     vm_offset_t b_offset, int xfersize)
657 {
658 
659 	pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
660 }
661 
662 void
663 pmap_zero_page(vm_page_t m)
664 {
665 
666 	pmap_methods_ptr->pm_zero_page(m);
667 }
668 
669 void
670 pmap_zero_page_area(vm_page_t m, int off, int size)
671 {
672 
673 	pmap_methods_ptr->pm_zero_page_area(m, off, size);
674 }
675 
676 int
677 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
678     u_int flags, int8_t psind)
679 {
680 
681 	return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
682 }
683 
684 void
685 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
686     vm_page_t m_start, vm_prot_t prot)
687 {
688 
689 	pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
690 }
691 
692 void
693 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
694 {
695 
696 	pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
697 }
698 
699 void *
700 pmap_kenter_temporary(vm_paddr_t pa, int i)
701 {
702 
703 	return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
704 }
705 
706 void
707 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
708     vm_pindex_t pindex, vm_size_t size)
709 {
710 
711 	pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
712 }
713 
714 void
715 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
716 {
717 
718 	pmap_methods_ptr->pm_unwire(pmap, sva, eva);
719 }
720 
721 boolean_t
722 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
723 {
724 
725 	return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
726 }
727 
728 int
729 pmap_page_wired_mappings(vm_page_t m)
730 {
731 
732 	return (pmap_methods_ptr->pm_page_wired_mappings(m));
733 }
734 
735 boolean_t
736 pmap_page_is_mapped(vm_page_t m)
737 {
738 
739 	return (pmap_methods_ptr->pm_page_is_mapped(m));
740 }
741 
742 void
743 pmap_remove_pages(pmap_t pmap)
744 {
745 
746 	pmap_methods_ptr->pm_remove_pages(pmap);
747 }
748 
749 boolean_t
750 pmap_is_modified(vm_page_t m)
751 {
752 
753 	return (pmap_methods_ptr->pm_is_modified(m));
754 }
755 
756 boolean_t
757 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
758 {
759 
760 	return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
761 }
762 
763 boolean_t
764 pmap_is_referenced(vm_page_t m)
765 {
766 
767 	return (pmap_methods_ptr->pm_is_referenced(m));
768 }
769 
770 void
771 pmap_remove_write(vm_page_t m)
772 {
773 
774 	pmap_methods_ptr->pm_remove_write(m);
775 }
776 
777 int
778 pmap_ts_referenced(vm_page_t m)
779 {
780 
781 	return (pmap_methods_ptr->pm_ts_referenced(m));
782 }
783 
784 void *
785 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
786 {
787 
788 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
789 	    MAPDEV_SETATTR));
790 }
791 
792 void *
793 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
794 {
795 
796 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
797 	    MAPDEV_SETATTR));
798 }
799 
800 void *
801 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
802 {
803 
804 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
805 }
806 
807 void
808 pmap_unmapdev(void *p, vm_size_t size)
809 {
810 
811 	pmap_methods_ptr->pm_unmapdev(p, size);
812 }
813 
814 void
815 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
816 {
817 
818 	pmap_methods_ptr->pm_page_set_memattr(m, ma);
819 }
820 
821 vm_paddr_t
822 pmap_extract(pmap_t pmap, vm_offset_t va)
823 {
824 
825 	return (pmap_methods_ptr->pm_extract(pmap, va));
826 }
827 
828 vm_page_t
829 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
830 {
831 
832 	return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
833 }
834 
835 vm_offset_t
836 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
837 {
838 
839 	return (pmap_methods_ptr->pm_map(virt, start, end, prot));
840 }
841 
842 void
843 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
844 {
845 
846 	pmap_methods_ptr->pm_qenter(sva, ma, count);
847 }
848 
849 void
850 pmap_qremove(vm_offset_t sva, int count)
851 {
852 
853 	pmap_methods_ptr->pm_qremove(sva, count);
854 }
855 
856 void
857 pmap_release(pmap_t pmap)
858 {
859 
860 	pmap_methods_ptr->pm_release(pmap);
861 }
862 
863 void
864 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
865 {
866 
867 	pmap_methods_ptr->pm_remove(pmap, sva, eva);
868 }
869 
870 void
871 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
872 {
873 
874 	pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
875 }
876 
877 void
878 pmap_remove_all(vm_page_t m)
879 {
880 
881 	pmap_methods_ptr->pm_remove_all(m);
882 }
883 
884 void
885 pmap_init(void)
886 {
887 
888 	pmap_methods_ptr->pm_init();
889 }
890 
891 void
892 pmap_init_pat(void)
893 {
894 
895 	pmap_methods_ptr->pm_init_pat();
896 }
897 
898 void
899 pmap_growkernel(vm_offset_t addr)
900 {
901 
902 	pmap_methods_ptr->pm_growkernel(addr);
903 }
904 
905 void
906 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
907 {
908 
909 	pmap_methods_ptr->pm_invalidate_page(pmap, va);
910 }
911 
912 void
913 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
914 {
915 
916 	pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
917 }
918 
919 void
920 pmap_invalidate_all(pmap_t pmap)
921 {
922 
923 	pmap_methods_ptr->pm_invalidate_all(pmap);
924 }
925 
926 void
927 pmap_invalidate_cache(void)
928 {
929 
930 	pmap_methods_ptr->pm_invalidate_cache();
931 }
932 
933 void
934 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
935 {
936 
937 	pmap_methods_ptr->pm_kenter(va, pa);
938 }
939 
940 void
941 pmap_kremove(vm_offset_t va)
942 {
943 
944 	pmap_methods_ptr->pm_kremove(va);
945 }
946 
947 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
948 int pae_mode;
949 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
950     &pae_mode, 0,
951     "PAE");
952 
953 void
954 pmap_cold(void)
955 {
956 
957 	init_static_kenv((char *)bootinfo.bi_envp, 0);
958 	pae_mode = (cpu_feature & CPUID_PAE) != 0;
959 	if (pae_mode)
960 		TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
961 	if (pae_mode) {
962 		pmap_methods_ptr = &pmap_pae_methods;
963 		pmap_pae_cold();
964 	} else {
965 		pmap_methods_ptr = &pmap_nopae_methods;
966 		pmap_nopae_cold();
967 	}
968 }
969