1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 * Copyright (c) 2003 Peter Wemm
11 * All rights reserved.
12 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13 * All rights reserved.
14 *
15 * This code is derived from software contributed to Berkeley by
16 * the Systems Programming Group of the University of Utah Computer
17 * Science Department and William Jolitz of UUNET Technologies Inc.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 * 3. All advertising materials mentioning features or use of this software
28 * must display the following acknowledgement:
29 * This product includes software developed by the University of
30 * California, Berkeley and its contributors.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 */
47 /*-
48 * Copyright (c) 2003 Networks Associates Technology, Inc.
49 * Copyright (c) 2014-2020 The FreeBSD Foundation
50 * All rights reserved.
51 *
52 * This software was developed for the FreeBSD Project by Jake Burkholder,
53 * Safeport Network Services, and Network Associates Laboratories, the
54 * Security Research Division of Network Associates, Inc. under
55 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
56 * CHATS research program.
57 *
58 * Portions of this software were developed by
59 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
60 * the FreeBSD Foundation.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions
64 * are met:
65 * 1. Redistributions of source code must retain the above copyright
66 * notice, this list of conditions and the following disclaimer.
67 * 2. Redistributions in binary form must reproduce the above copyright
68 * notice, this list of conditions and the following disclaimer in the
69 * documentation and/or other materials provided with the distribution.
70 *
71 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81 * SUCH DAMAGE.
82 */
83
84 #define AMD64_NPT_AWARE
85
86 #include <sys/cdefs.h>
87 /*
88 * Manages physical address maps.
89 *
90 * Since the information managed by this module is
91 * also stored by the logical address mapping module,
92 * this module may throw away valid virtual-to-physical
93 * mappings at almost any time. However, invalidations
94 * of virtual-to-physical mappings must be done as
95 * requested.
96 *
97 * In order to cope with hardware architectures which
98 * make virtual-to-physical map invalidates expensive,
99 * this module may delay invalidate or reduced protection
100 * operations until such time as they are actually
101 * necessary. This module is given full information as
102 * to which processors are currently using which maps,
103 * and to when physical maps must be made correct.
104 */
105
106 #include "opt_ddb.h"
107 #include "opt_pmap.h"
108 #include "opt_vm.h"
109
110 #include <sys/param.h>
111 #include <sys/asan.h>
112 #include <sys/bitstring.h>
113 #include <sys/bus.h>
114 #include <sys/systm.h>
115 #include <sys/counter.h>
116 #include <sys/kernel.h>
117 #include <sys/ktr.h>
118 #include <sys/lock.h>
119 #include <sys/malloc.h>
120 #include <sys/mman.h>
121 #include <sys/msan.h>
122 #include <sys/mutex.h>
123 #include <sys/proc.h>
124 #include <sys/rangeset.h>
125 #include <sys/rwlock.h>
126 #include <sys/sbuf.h>
127 #include <sys/smr.h>
128 #include <sys/sx.h>
129 #include <sys/turnstile.h>
130 #include <sys/vmem.h>
131 #include <sys/vmmeter.h>
132 #include <sys/sched.h>
133 #include <sys/sysctl.h>
134 #include <sys/smp.h>
135 #ifdef DDB
136 #include <sys/kdb.h>
137 #include <ddb/ddb.h>
138 #endif
139
140 #include <vm/vm.h>
141 #include <vm/vm_param.h>
142 #include <vm/vm_kern.h>
143 #include <vm/vm_page.h>
144 #include <vm/vm_map.h>
145 #include <vm/vm_object.h>
146 #include <vm/vm_extern.h>
147 #include <vm/vm_pageout.h>
148 #include <vm/vm_pager.h>
149 #include <vm/vm_phys.h>
150 #include <vm/vm_radix.h>
151 #include <vm/vm_reserv.h>
152 #include <vm/vm_dumpset.h>
153 #include <vm/uma.h>
154
155 #include <machine/asan.h>
156 #include <machine/intr_machdep.h>
157 #include <x86/apicvar.h>
158 #include <x86/ifunc.h>
159 #include <machine/cpu.h>
160 #include <machine/cputypes.h>
161 #include <machine/md_var.h>
162 #include <machine/msan.h>
163 #include <machine/pcb.h>
164 #include <machine/specialreg.h>
165 #ifdef SMP
166 #include <machine/smp.h>
167 #endif
168 #include <machine/sysarch.h>
169 #include <machine/tss.h>
170
171 #ifdef NUMA
172 #define PMAP_MEMDOM MAXMEMDOM
173 #else
174 #define PMAP_MEMDOM 1
175 #endif
176
177 static __inline bool
pmap_type_guest(pmap_t pmap)178 pmap_type_guest(pmap_t pmap)
179 {
180
181 return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
182 }
183
184 static __inline bool
pmap_emulate_ad_bits(pmap_t pmap)185 pmap_emulate_ad_bits(pmap_t pmap)
186 {
187
188 return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
189 }
190
191 static __inline pt_entry_t
pmap_valid_bit(pmap_t pmap)192 pmap_valid_bit(pmap_t pmap)
193 {
194 pt_entry_t mask;
195
196 switch (pmap->pm_type) {
197 case PT_X86:
198 case PT_RVI:
199 mask = X86_PG_V;
200 break;
201 case PT_EPT:
202 if (pmap_emulate_ad_bits(pmap))
203 mask = EPT_PG_EMUL_V;
204 else
205 mask = EPT_PG_READ;
206 break;
207 default:
208 panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
209 }
210
211 return (mask);
212 }
213
214 static __inline pt_entry_t
pmap_rw_bit(pmap_t pmap)215 pmap_rw_bit(pmap_t pmap)
216 {
217 pt_entry_t mask;
218
219 switch (pmap->pm_type) {
220 case PT_X86:
221 case PT_RVI:
222 mask = X86_PG_RW;
223 break;
224 case PT_EPT:
225 if (pmap_emulate_ad_bits(pmap))
226 mask = EPT_PG_EMUL_RW;
227 else
228 mask = EPT_PG_WRITE;
229 break;
230 default:
231 panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
232 }
233
234 return (mask);
235 }
236
237 static pt_entry_t pg_g;
238
239 static __inline pt_entry_t
pmap_global_bit(pmap_t pmap)240 pmap_global_bit(pmap_t pmap)
241 {
242 pt_entry_t mask;
243
244 switch (pmap->pm_type) {
245 case PT_X86:
246 mask = pg_g;
247 break;
248 case PT_RVI:
249 case PT_EPT:
250 mask = 0;
251 break;
252 default:
253 panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
254 }
255
256 return (mask);
257 }
258
259 static __inline pt_entry_t
pmap_accessed_bit(pmap_t pmap)260 pmap_accessed_bit(pmap_t pmap)
261 {
262 pt_entry_t mask;
263
264 switch (pmap->pm_type) {
265 case PT_X86:
266 case PT_RVI:
267 mask = X86_PG_A;
268 break;
269 case PT_EPT:
270 if (pmap_emulate_ad_bits(pmap))
271 mask = EPT_PG_READ;
272 else
273 mask = EPT_PG_A;
274 break;
275 default:
276 panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
277 }
278
279 return (mask);
280 }
281
282 static __inline pt_entry_t
pmap_modified_bit(pmap_t pmap)283 pmap_modified_bit(pmap_t pmap)
284 {
285 pt_entry_t mask;
286
287 switch (pmap->pm_type) {
288 case PT_X86:
289 case PT_RVI:
290 mask = X86_PG_M;
291 break;
292 case PT_EPT:
293 if (pmap_emulate_ad_bits(pmap))
294 mask = EPT_PG_WRITE;
295 else
296 mask = EPT_PG_M;
297 break;
298 default:
299 panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
300 }
301
302 return (mask);
303 }
304
305 static __inline pt_entry_t
pmap_pku_mask_bit(pmap_t pmap)306 pmap_pku_mask_bit(pmap_t pmap)
307 {
308
309 return (pmap->pm_type == PT_X86 ? X86_PG_PKU_MASK : 0);
310 }
311
312 static __inline bool
safe_to_clear_referenced(pmap_t pmap,pt_entry_t pte)313 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
314 {
315
316 if (!pmap_emulate_ad_bits(pmap))
317 return (true);
318
319 KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
320
321 /*
322 * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
323 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
324 * if the EPT_PG_WRITE bit is set.
325 */
326 if ((pte & EPT_PG_WRITE) != 0)
327 return (false);
328
329 /*
330 * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
331 */
332 if ((pte & EPT_PG_EXECUTE) == 0 ||
333 ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
334 return (true);
335 else
336 return (false);
337 }
338
339 #ifdef PV_STATS
340 #define PV_STAT(x) do { x ; } while (0)
341 #else
342 #define PV_STAT(x) do { } while (0)
343 #endif
344
345 #undef pa_index
346 #ifdef NUMA
347 #define pa_index(pa) ({ \
348 KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end, \
349 ("address %lx beyond the last segment", (pa))); \
350 (pa) >> PDRSHIFT; \
351 })
352 #define pa_to_pmdp(pa) (&pv_table[pa_index(pa)])
353 #define pa_to_pvh(pa) (&(pa_to_pmdp(pa)->pv_page))
354 #define PHYS_TO_PV_LIST_LOCK(pa) ({ \
355 struct rwlock *_lock; \
356 if (__predict_false((pa) > pmap_last_pa)) \
357 _lock = &pv_dummy_large.pv_lock; \
358 else \
359 _lock = &(pa_to_pmdp(pa)->pv_lock); \
360 _lock; \
361 })
362 #else
363 #define pa_index(pa) ((pa) >> PDRSHIFT)
364 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
365
366 #define NPV_LIST_LOCKS MAXCPU
367
368 #define PHYS_TO_PV_LIST_LOCK(pa) \
369 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
370 #endif
371
372 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
373 struct rwlock **_lockp = (lockp); \
374 struct rwlock *_new_lock; \
375 \
376 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
377 if (_new_lock != *_lockp) { \
378 if (*_lockp != NULL) \
379 rw_wunlock(*_lockp); \
380 *_lockp = _new_lock; \
381 rw_wlock(*_lockp); \
382 } \
383 } while (0)
384
385 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
386 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
387
388 #define RELEASE_PV_LIST_LOCK(lockp) do { \
389 struct rwlock **_lockp = (lockp); \
390 \
391 if (*_lockp != NULL) { \
392 rw_wunlock(*_lockp); \
393 *_lockp = NULL; \
394 } \
395 } while (0)
396
397 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
398 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
399
400 /*
401 * Statically allocate kernel pmap memory. However, memory for
402 * pm_pcids is obtained after the dynamic allocator is operational.
403 * Initialize it with a non-canonical pointer to catch early accesses
404 * regardless of the active mapping.
405 */
406 struct pmap kernel_pmap_store = {
407 .pm_pcidp = (void *)0xdeadbeefdeadbeef,
408 };
409
410 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
411 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
412
413 int nkpt;
414 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
415 "Number of kernel page table pages allocated on bootup");
416
417 static int ndmpdp;
418 vm_paddr_t dmaplimit;
419 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
420 pt_entry_t pg_nx;
421
422 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
423 "VM/pmap parameters");
424
425 static int __read_frequently pg_ps_enabled = 1;
426 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
427 &pg_ps_enabled, 0, "Are large page mappings enabled?");
428
429 int __read_frequently la57 = 0;
430 SYSCTL_INT(_vm_pmap, OID_AUTO, la57, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
431 &la57, 0,
432 "5-level paging for host is enabled");
433
434 static bool
pmap_is_la57(pmap_t pmap)435 pmap_is_la57(pmap_t pmap)
436 {
437 if (pmap->pm_type == PT_X86)
438 return (la57);
439 return (false); /* XXXKIB handle EPT */
440 }
441
442 #define PAT_INDEX_SIZE 8
443 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
444
445 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
446 static u_int64_t KPDphys; /* phys addr of kernel level 2 */
447 static u_int64_t KPDPphys; /* phys addr of kernel level 3 */
448 u_int64_t KPML4phys; /* phys addr of kernel level 4 */
449 u_int64_t KPML5phys; /* phys addr of kernel level 5,
450 if supported */
451
452 #ifdef KASAN
453 static uint64_t KASANPDPphys;
454 #endif
455 #ifdef KMSAN
456 static uint64_t KMSANSHADPDPphys;
457 static uint64_t KMSANORIGPDPphys;
458
459 /*
460 * To support systems with large amounts of memory, it is necessary to extend
461 * the maximum size of the direct map. This could eat into the space reserved
462 * for the shadow map.
463 */
464 _Static_assert(DMPML4I + NDMPML4E <= KMSANSHADPML4I, "direct map overflow");
465 #endif
466
467 static pml4_entry_t *kernel_pml4;
468 static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
469 static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
470 static int ndmpdpphys; /* number of DMPDPphys pages */
471
472 vm_paddr_t kernphys; /* phys addr of start of bootstrap data */
473 vm_paddr_t KERNend; /* and the end */
474
475 /*
476 * pmap_mapdev support pre initialization (i.e. console)
477 */
478 #define PMAP_PREINIT_MAPPING_COUNT 8
479 static struct pmap_preinit_mapping {
480 vm_paddr_t pa;
481 vm_offset_t va;
482 vm_size_t sz;
483 int mode;
484 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
485 static int pmap_initialized;
486
487 /*
488 * Data for the pv entry allocation mechanism.
489 * Updates to pv_invl_gen are protected by the pv list lock but reads are not.
490 */
491 #ifdef NUMA
492 static __inline int
pc_to_domain(struct pv_chunk * pc)493 pc_to_domain(struct pv_chunk *pc)
494 {
495
496 return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
497 }
498 #else
499 static __inline int
pc_to_domain(struct pv_chunk * pc __unused)500 pc_to_domain(struct pv_chunk *pc __unused)
501 {
502
503 return (0);
504 }
505 #endif
506
507 struct pv_chunks_list {
508 struct mtx pvc_lock;
509 TAILQ_HEAD(pch, pv_chunk) pvc_list;
510 int active_reclaims;
511 } __aligned(CACHE_LINE_SIZE);
512
513 struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM];
514
515 #ifdef NUMA
516 struct pmap_large_md_page {
517 struct rwlock pv_lock;
518 struct md_page pv_page;
519 u_long pv_invl_gen;
520 };
521 __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
522 #define pv_dummy pv_dummy_large.pv_page
523 __read_mostly static struct pmap_large_md_page *pv_table;
524 __read_mostly vm_paddr_t pmap_last_pa;
525 #else
526 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
527 static u_long pv_invl_gen[NPV_LIST_LOCKS];
528 static struct md_page *pv_table;
529 static struct md_page pv_dummy;
530 #endif
531
532 /*
533 * All those kernel PT submaps that BSD is so fond of
534 */
535 pt_entry_t *CMAP1 = NULL;
536 caddr_t CADDR1 = 0;
537 static vm_offset_t qframe = 0;
538 static struct mtx qframe_mtx;
539
540 static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
541
542 static vmem_t *large_vmem;
543 static u_int lm_ents;
544 #define PMAP_ADDRESS_IN_LARGEMAP(va) ((va) >= LARGEMAP_MIN_ADDRESS && \
545 (va) < LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
546
547 int pmap_pcid_enabled = 1;
548 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
549 &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
550 int invpcid_works = 0;
551 SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
552 "Is the invpcid instruction available ?");
553 int pmap_pcid_invlpg_workaround = 0;
554 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_invlpg_workaround,
555 CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
556 &pmap_pcid_invlpg_workaround, 0,
557 "Enable small core PCID/INVLPG workaround");
558 int pmap_pcid_invlpg_workaround_uena = 1;
559
560 int __read_frequently pti = 0;
561 SYSCTL_INT(_vm_pmap, OID_AUTO, pti, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
562 &pti, 0,
563 "Page Table Isolation enabled");
564 static vm_object_t pti_obj;
565 static pml4_entry_t *pti_pml4;
566 static vm_pindex_t pti_pg_idx;
567 static bool pti_finalized;
568
569 struct pmap_pkru_range {
570 struct rs_el pkru_rs_el;
571 u_int pkru_keyidx;
572 int pkru_flags;
573 };
574
575 static uma_zone_t pmap_pkru_ranges_zone;
576 static bool pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
577 pt_entry_t *pte);
578 static pt_entry_t pmap_pkru_get(pmap_t pmap, vm_offset_t va);
579 static void pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
580 static void *pkru_dup_range(void *ctx, void *data);
581 static void pkru_free_range(void *ctx, void *node);
582 static int pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap);
583 static int pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
584 static void pmap_pkru_deassign_all(pmap_t pmap);
585
586 static COUNTER_U64_DEFINE_EARLY(pcid_save_cnt);
587 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLFLAG_RD,
588 &pcid_save_cnt, "Count of saved TLB context on switch");
589
590 static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
591 LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
592 static struct mtx invl_gen_mtx;
593 /* Fake lock object to satisfy turnstiles interface. */
594 static struct lock_object invl_gen_ts = {
595 .lo_name = "invlts",
596 };
597 static struct pmap_invl_gen pmap_invl_gen_head = {
598 .gen = 1,
599 .next = NULL,
600 };
601 static u_long pmap_invl_gen = 1;
602 static int pmap_invl_waiters;
603 static struct callout pmap_invl_callout;
604 static bool pmap_invl_callout_inited;
605
606 #define PMAP_ASSERT_NOT_IN_DI() \
607 KASSERT(pmap_not_in_di(), ("DI already started"))
608
609 static bool
pmap_di_locked(void)610 pmap_di_locked(void)
611 {
612 int tun;
613
614 if ((cpu_feature2 & CPUID2_CX16) == 0)
615 return (true);
616 tun = 0;
617 TUNABLE_INT_FETCH("vm.pmap.di_locked", &tun);
618 return (tun != 0);
619 }
620
621 static int
sysctl_pmap_di_locked(SYSCTL_HANDLER_ARGS)622 sysctl_pmap_di_locked(SYSCTL_HANDLER_ARGS)
623 {
624 int locked;
625
626 locked = pmap_di_locked();
627 return (sysctl_handle_int(oidp, &locked, 0, req));
628 }
629 SYSCTL_PROC(_vm_pmap, OID_AUTO, di_locked, CTLTYPE_INT | CTLFLAG_RDTUN |
630 CTLFLAG_MPSAFE, 0, 0, sysctl_pmap_di_locked, "",
631 "Locked delayed invalidation");
632
633 static bool pmap_not_in_di_l(void);
634 static bool pmap_not_in_di_u(void);
635 DEFINE_IFUNC(, bool, pmap_not_in_di, (void))
636 {
637
638 return (pmap_di_locked() ? pmap_not_in_di_l : pmap_not_in_di_u);
639 }
640
641 static bool
pmap_not_in_di_l(void)642 pmap_not_in_di_l(void)
643 {
644 struct pmap_invl_gen *invl_gen;
645
646 invl_gen = &curthread->td_md.md_invl_gen;
647 return (invl_gen->gen == 0);
648 }
649
650 static void
pmap_thread_init_invl_gen_l(struct thread * td)651 pmap_thread_init_invl_gen_l(struct thread *td)
652 {
653 struct pmap_invl_gen *invl_gen;
654
655 invl_gen = &td->td_md.md_invl_gen;
656 invl_gen->gen = 0;
657 }
658
659 static void
pmap_delayed_invl_wait_block(u_long * m_gen,u_long * invl_gen)660 pmap_delayed_invl_wait_block(u_long *m_gen, u_long *invl_gen)
661 {
662 struct turnstile *ts;
663
664 ts = turnstile_trywait(&invl_gen_ts);
665 if (*m_gen > atomic_load_long(invl_gen))
666 turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
667 else
668 turnstile_cancel(ts);
669 }
670
671 static void
pmap_delayed_invl_finish_unblock(u_long new_gen)672 pmap_delayed_invl_finish_unblock(u_long new_gen)
673 {
674 struct turnstile *ts;
675
676 turnstile_chain_lock(&invl_gen_ts);
677 ts = turnstile_lookup(&invl_gen_ts);
678 if (new_gen != 0)
679 pmap_invl_gen = new_gen;
680 if (ts != NULL) {
681 turnstile_broadcast(ts, TS_SHARED_QUEUE);
682 turnstile_unpend(ts);
683 }
684 turnstile_chain_unlock(&invl_gen_ts);
685 }
686
687 /*
688 * Start a new Delayed Invalidation (DI) block of code, executed by
689 * the current thread. Within a DI block, the current thread may
690 * destroy both the page table and PV list entries for a mapping and
691 * then release the corresponding PV list lock before ensuring that
692 * the mapping is flushed from the TLBs of any processors with the
693 * pmap active.
694 */
695 static void
pmap_delayed_invl_start_l(void)696 pmap_delayed_invl_start_l(void)
697 {
698 struct pmap_invl_gen *invl_gen;
699 u_long currgen;
700
701 invl_gen = &curthread->td_md.md_invl_gen;
702 PMAP_ASSERT_NOT_IN_DI();
703 mtx_lock(&invl_gen_mtx);
704 if (LIST_EMPTY(&pmap_invl_gen_tracker))
705 currgen = pmap_invl_gen;
706 else
707 currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
708 invl_gen->gen = currgen + 1;
709 LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
710 mtx_unlock(&invl_gen_mtx);
711 }
712
713 /*
714 * Finish the DI block, previously started by the current thread. All
715 * required TLB flushes for the pages marked by
716 * pmap_delayed_invl_page() must be finished before this function is
717 * called.
718 *
719 * This function works by bumping the global DI generation number to
720 * the generation number of the current thread's DI, unless there is a
721 * pending DI that started earlier. In the latter case, bumping the
722 * global DI generation number would incorrectly signal that the
723 * earlier DI had finished. Instead, this function bumps the earlier
724 * DI's generation number to match the generation number of the
725 * current thread's DI.
726 */
727 static void
pmap_delayed_invl_finish_l(void)728 pmap_delayed_invl_finish_l(void)
729 {
730 struct pmap_invl_gen *invl_gen, *next;
731
732 invl_gen = &curthread->td_md.md_invl_gen;
733 KASSERT(invl_gen->gen != 0, ("missed invl_start"));
734 mtx_lock(&invl_gen_mtx);
735 next = LIST_NEXT(invl_gen, link);
736 if (next == NULL)
737 pmap_delayed_invl_finish_unblock(invl_gen->gen);
738 else
739 next->gen = invl_gen->gen;
740 LIST_REMOVE(invl_gen, link);
741 mtx_unlock(&invl_gen_mtx);
742 invl_gen->gen = 0;
743 }
744
745 static bool
pmap_not_in_di_u(void)746 pmap_not_in_di_u(void)
747 {
748 struct pmap_invl_gen *invl_gen;
749
750 invl_gen = &curthread->td_md.md_invl_gen;
751 return (((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) != 0);
752 }
753
754 static void
pmap_thread_init_invl_gen_u(struct thread * td)755 pmap_thread_init_invl_gen_u(struct thread *td)
756 {
757 struct pmap_invl_gen *invl_gen;
758
759 invl_gen = &td->td_md.md_invl_gen;
760 invl_gen->gen = 0;
761 invl_gen->next = (void *)PMAP_INVL_GEN_NEXT_INVALID;
762 }
763
764 static bool
pmap_di_load_invl(struct pmap_invl_gen * ptr,struct pmap_invl_gen * out)765 pmap_di_load_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *out)
766 {
767 uint64_t new_high, new_low, old_high, old_low;
768 char res;
769
770 old_low = new_low = 0;
771 old_high = new_high = (uintptr_t)0;
772
773 __asm volatile("lock;cmpxchg16b\t%1"
774 : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
775 : "b"(new_low), "c" (new_high)
776 : "memory", "cc");
777 if (res == 0) {
778 if ((old_high & PMAP_INVL_GEN_NEXT_INVALID) != 0)
779 return (false);
780 out->gen = old_low;
781 out->next = (void *)old_high;
782 } else {
783 out->gen = new_low;
784 out->next = (void *)new_high;
785 }
786 return (true);
787 }
788
789 static bool
pmap_di_store_invl(struct pmap_invl_gen * ptr,struct pmap_invl_gen * old_val,struct pmap_invl_gen * new_val)790 pmap_di_store_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *old_val,
791 struct pmap_invl_gen *new_val)
792 {
793 uint64_t new_high, new_low, old_high, old_low;
794 char res;
795
796 new_low = new_val->gen;
797 new_high = (uintptr_t)new_val->next;
798 old_low = old_val->gen;
799 old_high = (uintptr_t)old_val->next;
800
801 __asm volatile("lock;cmpxchg16b\t%1"
802 : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
803 : "b"(new_low), "c" (new_high)
804 : "memory", "cc");
805 return (res);
806 }
807
808 static COUNTER_U64_DEFINE_EARLY(pv_page_count);
809 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_page_count, CTLFLAG_RD,
810 &pv_page_count, "Current number of allocated pv pages");
811
812 static COUNTER_U64_DEFINE_EARLY(user_pt_page_count);
813 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, user_pt_page_count, CTLFLAG_RD,
814 &user_pt_page_count,
815 "Current number of allocated page table pages for userspace");
816
817 static COUNTER_U64_DEFINE_EARLY(kernel_pt_page_count);
818 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, kernel_pt_page_count, CTLFLAG_RD,
819 &kernel_pt_page_count,
820 "Current number of allocated page table pages for the kernel");
821
822 #ifdef PV_STATS
823
824 static COUNTER_U64_DEFINE_EARLY(invl_start_restart);
825 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_start_restart,
826 CTLFLAG_RD, &invl_start_restart,
827 "Number of delayed TLB invalidation request restarts");
828
829 static COUNTER_U64_DEFINE_EARLY(invl_finish_restart);
830 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_finish_restart, CTLFLAG_RD,
831 &invl_finish_restart,
832 "Number of delayed TLB invalidation completion restarts");
833
834 static int invl_max_qlen;
835 SYSCTL_INT(_vm_pmap, OID_AUTO, invl_max_qlen, CTLFLAG_RD,
836 &invl_max_qlen, 0,
837 "Maximum delayed TLB invalidation request queue length");
838 #endif
839
840 #define di_delay locks_delay
841
842 static void
pmap_delayed_invl_start_u(void)843 pmap_delayed_invl_start_u(void)
844 {
845 struct pmap_invl_gen *invl_gen, *p, prev, new_prev;
846 struct thread *td;
847 struct lock_delay_arg lda;
848 uintptr_t prevl;
849 u_char pri;
850 #ifdef PV_STATS
851 int i, ii;
852 #endif
853
854 td = curthread;
855 invl_gen = &td->td_md.md_invl_gen;
856 PMAP_ASSERT_NOT_IN_DI();
857 lock_delay_arg_init(&lda, &di_delay);
858 invl_gen->saved_pri = 0;
859 pri = td->td_base_pri;
860 if (pri > PVM) {
861 thread_lock(td);
862 pri = td->td_base_pri;
863 if (pri > PVM) {
864 invl_gen->saved_pri = pri;
865 sched_prio(td, PVM);
866 }
867 thread_unlock(td);
868 }
869 again:
870 PV_STAT(i = 0);
871 for (p = &pmap_invl_gen_head;; p = prev.next) {
872 PV_STAT(i++);
873 prevl = (uintptr_t)atomic_load_ptr(&p->next);
874 if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
875 PV_STAT(counter_u64_add(invl_start_restart, 1));
876 lock_delay(&lda);
877 goto again;
878 }
879 if (prevl == 0)
880 break;
881 prev.next = (void *)prevl;
882 }
883 #ifdef PV_STATS
884 if ((ii = invl_max_qlen) < i)
885 atomic_cmpset_int(&invl_max_qlen, ii, i);
886 #endif
887
888 if (!pmap_di_load_invl(p, &prev) || prev.next != NULL) {
889 PV_STAT(counter_u64_add(invl_start_restart, 1));
890 lock_delay(&lda);
891 goto again;
892 }
893
894 new_prev.gen = prev.gen;
895 new_prev.next = invl_gen;
896 invl_gen->gen = prev.gen + 1;
897
898 /* Formal fence between store to invl->gen and updating *p. */
899 atomic_thread_fence_rel();
900
901 /*
902 * After inserting an invl_gen element with invalid bit set,
903 * this thread blocks any other thread trying to enter the
904 * delayed invalidation block. Do not allow to remove us from
905 * the CPU, because it causes starvation for other threads.
906 */
907 critical_enter();
908
909 /*
910 * ABA for *p is not possible there, since p->gen can only
911 * increase. So if the *p thread finished its di, then
912 * started a new one and got inserted into the list at the
913 * same place, its gen will appear greater than the previously
914 * read gen.
915 */
916 if (!pmap_di_store_invl(p, &prev, &new_prev)) {
917 critical_exit();
918 PV_STAT(counter_u64_add(invl_start_restart, 1));
919 lock_delay(&lda);
920 goto again;
921 }
922
923 /*
924 * There we clear PMAP_INVL_GEN_NEXT_INVALID in
925 * invl_gen->next, allowing other threads to iterate past us.
926 * pmap_di_store_invl() provides fence between the generation
927 * write and the update of next.
928 */
929 invl_gen->next = NULL;
930 critical_exit();
931 }
932
933 static bool
pmap_delayed_invl_finish_u_crit(struct pmap_invl_gen * invl_gen,struct pmap_invl_gen * p)934 pmap_delayed_invl_finish_u_crit(struct pmap_invl_gen *invl_gen,
935 struct pmap_invl_gen *p)
936 {
937 struct pmap_invl_gen prev, new_prev;
938 u_long mygen;
939
940 /*
941 * Load invl_gen->gen after setting invl_gen->next
942 * PMAP_INVL_GEN_NEXT_INVALID. This prevents larger
943 * generations to propagate to our invl_gen->gen. Lock prefix
944 * in atomic_set_ptr() worked as seq_cst fence.
945 */
946 mygen = atomic_load_long(&invl_gen->gen);
947
948 if (!pmap_di_load_invl(p, &prev) || prev.next != invl_gen)
949 return (false);
950
951 KASSERT(prev.gen < mygen,
952 ("invalid di gen sequence %lu %lu", prev.gen, mygen));
953 new_prev.gen = mygen;
954 new_prev.next = (void *)((uintptr_t)invl_gen->next &
955 ~PMAP_INVL_GEN_NEXT_INVALID);
956
957 /* Formal fence between load of prev and storing update to it. */
958 atomic_thread_fence_rel();
959
960 return (pmap_di_store_invl(p, &prev, &new_prev));
961 }
962
963 static void
pmap_delayed_invl_finish_u(void)964 pmap_delayed_invl_finish_u(void)
965 {
966 struct pmap_invl_gen *invl_gen, *p;
967 struct thread *td;
968 struct lock_delay_arg lda;
969 uintptr_t prevl;
970
971 td = curthread;
972 invl_gen = &td->td_md.md_invl_gen;
973 KASSERT(invl_gen->gen != 0, ("missed invl_start: gen 0"));
974 KASSERT(((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) == 0,
975 ("missed invl_start: INVALID"));
976 lock_delay_arg_init(&lda, &di_delay);
977
978 again:
979 for (p = &pmap_invl_gen_head; p != NULL; p = (void *)prevl) {
980 prevl = (uintptr_t)atomic_load_ptr(&p->next);
981 if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
982 PV_STAT(counter_u64_add(invl_finish_restart, 1));
983 lock_delay(&lda);
984 goto again;
985 }
986 if ((void *)prevl == invl_gen)
987 break;
988 }
989
990 /*
991 * It is legitimate to not find ourself on the list if a
992 * thread before us finished its DI and started it again.
993 */
994 if (__predict_false(p == NULL)) {
995 PV_STAT(counter_u64_add(invl_finish_restart, 1));
996 lock_delay(&lda);
997 goto again;
998 }
999
1000 critical_enter();
1001 atomic_set_ptr((uintptr_t *)&invl_gen->next,
1002 PMAP_INVL_GEN_NEXT_INVALID);
1003 if (!pmap_delayed_invl_finish_u_crit(invl_gen, p)) {
1004 atomic_clear_ptr((uintptr_t *)&invl_gen->next,
1005 PMAP_INVL_GEN_NEXT_INVALID);
1006 critical_exit();
1007 PV_STAT(counter_u64_add(invl_finish_restart, 1));
1008 lock_delay(&lda);
1009 goto again;
1010 }
1011 critical_exit();
1012 if (atomic_load_int(&pmap_invl_waiters) > 0)
1013 pmap_delayed_invl_finish_unblock(0);
1014 if (invl_gen->saved_pri != 0) {
1015 thread_lock(td);
1016 sched_prio(td, invl_gen->saved_pri);
1017 thread_unlock(td);
1018 }
1019 }
1020
1021 #ifdef DDB
DB_SHOW_COMMAND(di_queue,pmap_di_queue)1022 DB_SHOW_COMMAND(di_queue, pmap_di_queue)
1023 {
1024 struct pmap_invl_gen *p, *pn;
1025 struct thread *td;
1026 uintptr_t nextl;
1027 bool first;
1028
1029 for (p = &pmap_invl_gen_head, first = true; p != NULL; p = pn,
1030 first = false) {
1031 nextl = (uintptr_t)atomic_load_ptr(&p->next);
1032 pn = (void *)(nextl & ~PMAP_INVL_GEN_NEXT_INVALID);
1033 td = first ? NULL : __containerof(p, struct thread,
1034 td_md.md_invl_gen);
1035 db_printf("gen %lu inv %d td %p tid %d\n", p->gen,
1036 (nextl & PMAP_INVL_GEN_NEXT_INVALID) != 0, td,
1037 td != NULL ? td->td_tid : -1);
1038 }
1039 }
1040 #endif
1041
1042 #ifdef PV_STATS
1043 static COUNTER_U64_DEFINE_EARLY(invl_wait);
1044 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_wait,
1045 CTLFLAG_RD, &invl_wait,
1046 "Number of times DI invalidation blocked pmap_remove_all/write");
1047
1048 static COUNTER_U64_DEFINE_EARLY(invl_wait_slow);
1049 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFLAG_RD,
1050 &invl_wait_slow, "Number of slow invalidation waits for lockless DI");
1051
1052 #endif
1053
1054 #ifdef NUMA
1055 static u_long *
pmap_delayed_invl_genp(vm_page_t m)1056 pmap_delayed_invl_genp(vm_page_t m)
1057 {
1058 vm_paddr_t pa;
1059 u_long *gen;
1060
1061 pa = VM_PAGE_TO_PHYS(m);
1062 if (__predict_false((pa) > pmap_last_pa))
1063 gen = &pv_dummy_large.pv_invl_gen;
1064 else
1065 gen = &(pa_to_pmdp(pa)->pv_invl_gen);
1066
1067 return (gen);
1068 }
1069 #else
1070 static u_long *
pmap_delayed_invl_genp(vm_page_t m)1071 pmap_delayed_invl_genp(vm_page_t m)
1072 {
1073
1074 return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
1075 }
1076 #endif
1077
1078 static void
pmap_delayed_invl_callout_func(void * arg __unused)1079 pmap_delayed_invl_callout_func(void *arg __unused)
1080 {
1081
1082 if (atomic_load_int(&pmap_invl_waiters) == 0)
1083 return;
1084 pmap_delayed_invl_finish_unblock(0);
1085 }
1086
1087 static void
pmap_delayed_invl_callout_init(void * arg __unused)1088 pmap_delayed_invl_callout_init(void *arg __unused)
1089 {
1090
1091 if (pmap_di_locked())
1092 return;
1093 callout_init(&pmap_invl_callout, 1);
1094 pmap_invl_callout_inited = true;
1095 }
1096 SYSINIT(pmap_di_callout, SI_SUB_CPU + 1, SI_ORDER_ANY,
1097 pmap_delayed_invl_callout_init, NULL);
1098
1099 /*
1100 * Ensure that all currently executing DI blocks, that need to flush
1101 * TLB for the given page m, actually flushed the TLB at the time the
1102 * function returned. If the page m has an empty PV list and we call
1103 * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
1104 * valid mapping for the page m in either its page table or TLB.
1105 *
1106 * This function works by blocking until the global DI generation
1107 * number catches up with the generation number associated with the
1108 * given page m and its PV list. Since this function's callers
1109 * typically own an object lock and sometimes own a page lock, it
1110 * cannot sleep. Instead, it blocks on a turnstile to relinquish the
1111 * processor.
1112 */
1113 static void
pmap_delayed_invl_wait_l(vm_page_t m)1114 pmap_delayed_invl_wait_l(vm_page_t m)
1115 {
1116 u_long *m_gen;
1117 #ifdef PV_STATS
1118 bool accounted = false;
1119 #endif
1120
1121 m_gen = pmap_delayed_invl_genp(m);
1122 while (*m_gen > pmap_invl_gen) {
1123 #ifdef PV_STATS
1124 if (!accounted) {
1125 counter_u64_add(invl_wait, 1);
1126 accounted = true;
1127 }
1128 #endif
1129 pmap_delayed_invl_wait_block(m_gen, &pmap_invl_gen);
1130 }
1131 }
1132
1133 static void
pmap_delayed_invl_wait_u(vm_page_t m)1134 pmap_delayed_invl_wait_u(vm_page_t m)
1135 {
1136 u_long *m_gen;
1137 struct lock_delay_arg lda;
1138 bool fast;
1139
1140 fast = true;
1141 m_gen = pmap_delayed_invl_genp(m);
1142 lock_delay_arg_init(&lda, &di_delay);
1143 while (*m_gen > atomic_load_long(&pmap_invl_gen_head.gen)) {
1144 if (fast || !pmap_invl_callout_inited) {
1145 PV_STAT(counter_u64_add(invl_wait, 1));
1146 lock_delay(&lda);
1147 fast = false;
1148 } else {
1149 /*
1150 * The page's invalidation generation number
1151 * is still below the current thread's number.
1152 * Prepare to block so that we do not waste
1153 * CPU cycles or worse, suffer livelock.
1154 *
1155 * Since it is impossible to block without
1156 * racing with pmap_delayed_invl_finish_u(),
1157 * prepare for the race by incrementing
1158 * pmap_invl_waiters and arming a 1-tick
1159 * callout which will unblock us if we lose
1160 * the race.
1161 */
1162 atomic_add_int(&pmap_invl_waiters, 1);
1163
1164 /*
1165 * Re-check the current thread's invalidation
1166 * generation after incrementing
1167 * pmap_invl_waiters, so that there is no race
1168 * with pmap_delayed_invl_finish_u() setting
1169 * the page generation and checking
1170 * pmap_invl_waiters. The only race allowed
1171 * is for a missed unblock, which is handled
1172 * by the callout.
1173 */
1174 if (*m_gen >
1175 atomic_load_long(&pmap_invl_gen_head.gen)) {
1176 callout_reset(&pmap_invl_callout, 1,
1177 pmap_delayed_invl_callout_func, NULL);
1178 PV_STAT(counter_u64_add(invl_wait_slow, 1));
1179 pmap_delayed_invl_wait_block(m_gen,
1180 &pmap_invl_gen_head.gen);
1181 }
1182 atomic_add_int(&pmap_invl_waiters, -1);
1183 }
1184 }
1185 }
1186
1187 DEFINE_IFUNC(, void, pmap_thread_init_invl_gen, (struct thread *))
1188 {
1189
1190 return (pmap_di_locked() ? pmap_thread_init_invl_gen_l :
1191 pmap_thread_init_invl_gen_u);
1192 }
1193
1194 DEFINE_IFUNC(static, void, pmap_delayed_invl_start, (void))
1195 {
1196
1197 return (pmap_di_locked() ? pmap_delayed_invl_start_l :
1198 pmap_delayed_invl_start_u);
1199 }
1200
1201 DEFINE_IFUNC(static, void, pmap_delayed_invl_finish, (void))
1202 {
1203
1204 return (pmap_di_locked() ? pmap_delayed_invl_finish_l :
1205 pmap_delayed_invl_finish_u);
1206 }
1207
1208 DEFINE_IFUNC(static, void, pmap_delayed_invl_wait, (vm_page_t))
1209 {
1210
1211 return (pmap_di_locked() ? pmap_delayed_invl_wait_l :
1212 pmap_delayed_invl_wait_u);
1213 }
1214
1215 /*
1216 * Mark the page m's PV list as participating in the current thread's
1217 * DI block. Any threads concurrently using m's PV list to remove or
1218 * restrict all mappings to m will wait for the current thread's DI
1219 * block to complete before proceeding.
1220 *
1221 * The function works by setting the DI generation number for m's PV
1222 * list to at least the DI generation number of the current thread.
1223 * This forces a caller of pmap_delayed_invl_wait() to block until
1224 * current thread calls pmap_delayed_invl_finish().
1225 */
1226 static void
pmap_delayed_invl_page(vm_page_t m)1227 pmap_delayed_invl_page(vm_page_t m)
1228 {
1229 u_long gen, *m_gen;
1230
1231 rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
1232 gen = curthread->td_md.md_invl_gen.gen;
1233 if (gen == 0)
1234 return;
1235 m_gen = pmap_delayed_invl_genp(m);
1236 if (*m_gen < gen)
1237 *m_gen = gen;
1238 }
1239
1240 /*
1241 * Crashdump maps.
1242 */
1243 static caddr_t crashdumpmap;
1244
1245 /*
1246 * Internal flags for pmap_enter()'s helper functions.
1247 */
1248 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
1249 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
1250
1251 /*
1252 * Internal flags for pmap_mapdev_internal() and
1253 * pmap_change_props_locked().
1254 */
1255 #define MAPDEV_FLUSHCACHE 0x00000001 /* Flush cache after mapping. */
1256 #define MAPDEV_SETATTR 0x00000002 /* Modify existing attrs. */
1257 #define MAPDEV_ASSERTVALID 0x00000004 /* Assert mapping validity. */
1258
1259 TAILQ_HEAD(pv_chunklist, pv_chunk);
1260
1261 static void free_pv_chunk(struct pv_chunk *pc);
1262 static void free_pv_chunk_batch(struct pv_chunklist *batch);
1263 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
1264 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
1265 static int popcnt_pc_map_pq(uint64_t *map);
1266 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
1267 static void reserve_pv_entries(pmap_t pmap, int needed,
1268 struct rwlock **lockp);
1269 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1270 struct rwlock **lockp);
1271 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
1272 u_int flags, struct rwlock **lockp);
1273 #if VM_NRESERVLEVEL > 0
1274 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1275 struct rwlock **lockp);
1276 #endif
1277 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
1278 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
1279 vm_offset_t va);
1280
1281 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
1282 static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
1283 vm_prot_t prot, int mode, int flags);
1284 static bool pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
1285 static bool pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
1286 vm_offset_t va, struct rwlock **lockp);
1287 static bool pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
1288 vm_offset_t va);
1289 static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
1290 vm_prot_t prot, struct rwlock **lockp);
1291 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
1292 u_int flags, vm_page_t m, struct rwlock **lockp);
1293 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
1294 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
1295 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
1296 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
1297 bool allpte_PG_A_set);
1298 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
1299 vm_offset_t eva);
1300 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
1301 vm_offset_t eva);
1302 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
1303 pd_entry_t pde);
1304 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
1305 static vm_page_t pmap_large_map_getptp_unlocked(void);
1306 static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
1307 #if VM_NRESERVLEVEL > 0
1308 static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
1309 vm_page_t mpte, struct rwlock **lockp);
1310 #endif
1311 static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
1312 vm_prot_t prot);
1313 static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask);
1314 static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
1315 bool exec);
1316 static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
1317 static pd_entry_t *pmap_pti_pde(vm_offset_t va);
1318 static void pmap_pti_wire_pte(void *pte);
1319 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
1320 struct spglist *free, struct rwlock **lockp);
1321 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
1322 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
1323 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
1324 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1325 struct spglist *free);
1326 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1327 pd_entry_t *pde, struct spglist *free,
1328 struct rwlock **lockp);
1329 static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
1330 vm_page_t m, struct rwlock **lockp);
1331 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1332 pd_entry_t newpde);
1333 static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
1334
1335 static pd_entry_t *pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
1336 struct rwlock **lockp);
1337 static vm_page_t pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex,
1338 struct rwlock **lockp, vm_offset_t va);
1339 static vm_page_t pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex,
1340 struct rwlock **lockp, vm_offset_t va);
1341 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
1342 struct rwlock **lockp);
1343
1344 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
1345 struct spglist *free);
1346 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
1347
1348 static vm_page_t pmap_alloc_pt_page(pmap_t, vm_pindex_t, int);
1349 static void pmap_free_pt_page(pmap_t, vm_page_t, bool);
1350
1351 /********************/
1352 /* Inline functions */
1353 /********************/
1354
1355 /*
1356 * Return a non-clipped indexes for a given VA, which are page table
1357 * pages indexes at the corresponding level.
1358 */
1359 static __inline vm_pindex_t
pmap_pde_pindex(vm_offset_t va)1360 pmap_pde_pindex(vm_offset_t va)
1361 {
1362 return (va >> PDRSHIFT);
1363 }
1364
1365 static __inline vm_pindex_t
pmap_pdpe_pindex(vm_offset_t va)1366 pmap_pdpe_pindex(vm_offset_t va)
1367 {
1368 return (NUPDE + (va >> PDPSHIFT));
1369 }
1370
1371 static __inline vm_pindex_t
pmap_pml4e_pindex(vm_offset_t va)1372 pmap_pml4e_pindex(vm_offset_t va)
1373 {
1374 return (NUPDE + NUPDPE + (va >> PML4SHIFT));
1375 }
1376
1377 static __inline vm_pindex_t
pmap_pml5e_pindex(vm_offset_t va)1378 pmap_pml5e_pindex(vm_offset_t va)
1379 {
1380 return (NUPDE + NUPDPE + NUPML4E + (va >> PML5SHIFT));
1381 }
1382
1383 static __inline pml4_entry_t *
pmap_pml5e(pmap_t pmap,vm_offset_t va)1384 pmap_pml5e(pmap_t pmap, vm_offset_t va)
1385 {
1386
1387 MPASS(pmap_is_la57(pmap));
1388 return (&pmap->pm_pmltop[pmap_pml5e_index(va)]);
1389 }
1390
1391 static __inline pml4_entry_t *
pmap_pml5e_u(pmap_t pmap,vm_offset_t va)1392 pmap_pml5e_u(pmap_t pmap, vm_offset_t va)
1393 {
1394
1395 MPASS(pmap_is_la57(pmap));
1396 return (&pmap->pm_pmltopu[pmap_pml5e_index(va)]);
1397 }
1398
1399 static __inline pml4_entry_t *
pmap_pml5e_to_pml4e(pml5_entry_t * pml5e,vm_offset_t va)1400 pmap_pml5e_to_pml4e(pml5_entry_t *pml5e, vm_offset_t va)
1401 {
1402 pml4_entry_t *pml4e;
1403
1404 /* XXX MPASS(pmap_is_la57(pmap); */
1405 pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1406 return (&pml4e[pmap_pml4e_index(va)]);
1407 }
1408
1409 /* Return a pointer to the PML4 slot that corresponds to a VA */
1410 static __inline pml4_entry_t *
pmap_pml4e(pmap_t pmap,vm_offset_t va)1411 pmap_pml4e(pmap_t pmap, vm_offset_t va)
1412 {
1413 pml5_entry_t *pml5e;
1414 pml4_entry_t *pml4e;
1415 pt_entry_t PG_V;
1416
1417 if (pmap_is_la57(pmap)) {
1418 pml5e = pmap_pml5e(pmap, va);
1419 PG_V = pmap_valid_bit(pmap);
1420 if ((*pml5e & PG_V) == 0)
1421 return (NULL);
1422 pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1423 } else {
1424 pml4e = pmap->pm_pmltop;
1425 }
1426 return (&pml4e[pmap_pml4e_index(va)]);
1427 }
1428
1429 static __inline pml4_entry_t *
pmap_pml4e_u(pmap_t pmap,vm_offset_t va)1430 pmap_pml4e_u(pmap_t pmap, vm_offset_t va)
1431 {
1432 MPASS(!pmap_is_la57(pmap));
1433 return (&pmap->pm_pmltopu[pmap_pml4e_index(va)]);
1434 }
1435
1436 /* Return a pointer to the PDP slot that corresponds to a VA */
1437 static __inline pdp_entry_t *
pmap_pml4e_to_pdpe(pml4_entry_t * pml4e,vm_offset_t va)1438 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
1439 {
1440 pdp_entry_t *pdpe;
1441
1442 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
1443 return (&pdpe[pmap_pdpe_index(va)]);
1444 }
1445
1446 /* Return a pointer to the PDP slot that corresponds to a VA */
1447 static __inline pdp_entry_t *
pmap_pdpe(pmap_t pmap,vm_offset_t va)1448 pmap_pdpe(pmap_t pmap, vm_offset_t va)
1449 {
1450 pml4_entry_t *pml4e;
1451 pt_entry_t PG_V;
1452
1453 PG_V = pmap_valid_bit(pmap);
1454 pml4e = pmap_pml4e(pmap, va);
1455 if (pml4e == NULL || (*pml4e & PG_V) == 0)
1456 return (NULL);
1457 return (pmap_pml4e_to_pdpe(pml4e, va));
1458 }
1459
1460 /* Return a pointer to the PD slot that corresponds to a VA */
1461 static __inline pd_entry_t *
pmap_pdpe_to_pde(pdp_entry_t * pdpe,vm_offset_t va)1462 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
1463 {
1464 pd_entry_t *pde;
1465
1466 KASSERT((*pdpe & PG_PS) == 0,
1467 ("%s: pdpe %#lx is a leaf", __func__, *pdpe));
1468 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
1469 return (&pde[pmap_pde_index(va)]);
1470 }
1471
1472 /* Return a pointer to the PD slot that corresponds to a VA */
1473 static __inline pd_entry_t *
pmap_pde(pmap_t pmap,vm_offset_t va)1474 pmap_pde(pmap_t pmap, vm_offset_t va)
1475 {
1476 pdp_entry_t *pdpe;
1477 pt_entry_t PG_V;
1478
1479 PG_V = pmap_valid_bit(pmap);
1480 pdpe = pmap_pdpe(pmap, va);
1481 if (pdpe == NULL || (*pdpe & PG_V) == 0)
1482 return (NULL);
1483 KASSERT((*pdpe & PG_PS) == 0,
1484 ("pmap_pde for 1G page, pmap %p va %#lx", pmap, va));
1485 return (pmap_pdpe_to_pde(pdpe, va));
1486 }
1487
1488 /* Return a pointer to the PT slot that corresponds to a VA */
1489 static __inline pt_entry_t *
pmap_pde_to_pte(pd_entry_t * pde,vm_offset_t va)1490 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
1491 {
1492 pt_entry_t *pte;
1493
1494 KASSERT((*pde & PG_PS) == 0,
1495 ("%s: pde %#lx is a leaf", __func__, *pde));
1496 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
1497 return (&pte[pmap_pte_index(va)]);
1498 }
1499
1500 /* Return a pointer to the PT slot that corresponds to a VA */
1501 static __inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_offset_t va)1502 pmap_pte(pmap_t pmap, vm_offset_t va)
1503 {
1504 pd_entry_t *pde;
1505 pt_entry_t PG_V;
1506
1507 PG_V = pmap_valid_bit(pmap);
1508 pde = pmap_pde(pmap, va);
1509 if (pde == NULL || (*pde & PG_V) == 0)
1510 return (NULL);
1511 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
1512 return ((pt_entry_t *)pde);
1513 return (pmap_pde_to_pte(pde, va));
1514 }
1515
1516 static __inline void
pmap_resident_count_adj(pmap_t pmap,int count)1517 pmap_resident_count_adj(pmap_t pmap, int count)
1518 {
1519
1520 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1521 KASSERT(pmap->pm_stats.resident_count + count >= 0,
1522 ("pmap %p resident count underflow %ld %d", pmap,
1523 pmap->pm_stats.resident_count, count));
1524 pmap->pm_stats.resident_count += count;
1525 }
1526
1527 static __inline void
pmap_pt_page_count_pinit(pmap_t pmap,int count)1528 pmap_pt_page_count_pinit(pmap_t pmap, int count)
1529 {
1530 KASSERT(pmap->pm_stats.resident_count + count >= 0,
1531 ("pmap %p resident count underflow %ld %d", pmap,
1532 pmap->pm_stats.resident_count, count));
1533 pmap->pm_stats.resident_count += count;
1534 }
1535
1536 static __inline void
pmap_pt_page_count_adj(pmap_t pmap,int count)1537 pmap_pt_page_count_adj(pmap_t pmap, int count)
1538 {
1539 if (pmap == kernel_pmap)
1540 counter_u64_add(kernel_pt_page_count, count);
1541 else {
1542 if (pmap != NULL)
1543 pmap_resident_count_adj(pmap, count);
1544 counter_u64_add(user_pt_page_count, count);
1545 }
1546 }
1547
1548 pt_entry_t vtoptem __read_mostly = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
1549 NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1) << 3;
1550 vm_offset_t PTmap __read_mostly = (vm_offset_t)P4Tmap;
1551
1552 pt_entry_t *
vtopte(vm_offset_t va)1553 vtopte(vm_offset_t va)
1554 {
1555 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
1556
1557 return ((pt_entry_t *)(PTmap + ((va >> (PAGE_SHIFT - 3)) & vtoptem)));
1558 }
1559
1560 pd_entry_t vtopdem __read_mostly = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
1561 NPML4EPGSHIFT)) - 1) << 3;
1562 vm_offset_t PDmap __read_mostly = (vm_offset_t)P4Dmap;
1563
1564 static __inline pd_entry_t *
vtopde(vm_offset_t va)1565 vtopde(vm_offset_t va)
1566 {
1567 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
1568
1569 return ((pt_entry_t *)(PDmap + ((va >> (PDRSHIFT - 3)) & vtopdem)));
1570 }
1571
1572 static u_int64_t
allocpages(vm_paddr_t * firstaddr,int n)1573 allocpages(vm_paddr_t *firstaddr, int n)
1574 {
1575 u_int64_t ret;
1576
1577 ret = *firstaddr;
1578 bzero((void *)ret, n * PAGE_SIZE);
1579 *firstaddr += n * PAGE_SIZE;
1580 return (ret);
1581 }
1582
1583 CTASSERT(powerof2(NDMPML4E));
1584
1585 /* number of kernel PDP slots */
1586 #define NKPDPE(ptpgs) howmany(ptpgs, NPDEPG)
1587
1588 static void
nkpt_init(vm_paddr_t addr)1589 nkpt_init(vm_paddr_t addr)
1590 {
1591 int pt_pages;
1592
1593 #ifdef NKPT
1594 pt_pages = NKPT;
1595 #else
1596 pt_pages = howmany(addr - kernphys, NBPDR) + 1; /* +1 for 2M hole @0 */
1597 pt_pages += NKPDPE(pt_pages);
1598
1599 /*
1600 * Add some slop beyond the bare minimum required for bootstrapping
1601 * the kernel.
1602 *
1603 * This is quite important when allocating KVA for kernel modules.
1604 * The modules are required to be linked in the negative 2GB of
1605 * the address space. If we run out of KVA in this region then
1606 * pmap_growkernel() will need to allocate page table pages to map
1607 * the entire 512GB of KVA space which is an unnecessary tax on
1608 * physical memory.
1609 *
1610 * Secondly, device memory mapped as part of setting up the low-
1611 * level console(s) is taken from KVA, starting at virtual_avail.
1612 * This is because cninit() is called after pmap_bootstrap() but
1613 * before vm_mem_init() and pmap_init(). 20MB for a frame buffer
1614 * is not uncommon.
1615 */
1616 pt_pages += 32; /* 64MB additional slop. */
1617 #endif
1618 nkpt = pt_pages;
1619 }
1620
1621 /*
1622 * Returns the proper write/execute permission for a physical page that is
1623 * part of the initial boot allocations.
1624 *
1625 * If the page has kernel text, it is marked as read-only. If the page has
1626 * kernel read-only data, it is marked as read-only/not-executable. If the
1627 * page has only read-write data, it is marked as read-write/not-executable.
1628 * If the page is below/above the kernel range, it is marked as read-write.
1629 *
1630 * This function operates on 2M pages, since we map the kernel space that
1631 * way.
1632 */
1633 static inline pt_entry_t
bootaddr_rwx(vm_paddr_t pa)1634 bootaddr_rwx(vm_paddr_t pa)
1635 {
1636 /*
1637 * The kernel is loaded at a 2MB-aligned address, and memory below that
1638 * need not be executable. The .bss section is padded to a 2MB
1639 * boundary, so memory following the kernel need not be executable
1640 * either. Preloaded kernel modules have their mapping permissions
1641 * fixed up by the linker.
1642 */
1643 if (pa < trunc_2mpage(kernphys + btext - KERNSTART) ||
1644 pa >= trunc_2mpage(kernphys + _end - KERNSTART))
1645 return (X86_PG_RW | pg_nx);
1646
1647 /*
1648 * The linker should ensure that the read-only and read-write
1649 * portions don't share the same 2M page, so this shouldn't
1650 * impact read-only data. However, in any case, any page with
1651 * read-write data needs to be read-write.
1652 */
1653 if (pa >= trunc_2mpage(kernphys + brwsection - KERNSTART))
1654 return (X86_PG_RW | pg_nx);
1655
1656 /*
1657 * Mark any 2M page containing kernel text as read-only. Mark
1658 * other pages with read-only data as read-only and not executable.
1659 * (It is likely a small portion of the read-only data section will
1660 * be marked as read-only, but executable. This should be acceptable
1661 * since the read-only protection will keep the data from changing.)
1662 * Note that fixups to the .text section will still work until we
1663 * set CR0.WP.
1664 */
1665 if (pa < round_2mpage(kernphys + etext - KERNSTART))
1666 return (0);
1667 return (pg_nx);
1668 }
1669
1670 static void
create_pagetables(vm_paddr_t * firstaddr)1671 create_pagetables(vm_paddr_t *firstaddr)
1672 {
1673 pd_entry_t *pd_p;
1674 pdp_entry_t *pdp_p;
1675 pml4_entry_t *p4_p;
1676 uint64_t DMPDkernphys;
1677 vm_paddr_t pax;
1678 #ifdef KASAN
1679 pt_entry_t *pt_p;
1680 uint64_t KASANPDphys, KASANPTphys, KASANphys;
1681 vm_offset_t kasankernbase;
1682 int kasankpdpi, kasankpdi, nkasanpte;
1683 #endif
1684 int i, j, ndm1g, nkpdpe, nkdmpde;
1685
1686 TSENTER();
1687 /* Allocate page table pages for the direct map */
1688 ndmpdp = howmany(ptoa(Maxmem), NBPDP);
1689 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
1690 ndmpdp = 4;
1691 ndmpdpphys = howmany(ndmpdp, NPDPEPG);
1692 if (ndmpdpphys > NDMPML4E) {
1693 /*
1694 * Each NDMPML4E allows 512 GB, so limit to that,
1695 * and then readjust ndmpdp and ndmpdpphys.
1696 */
1697 printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
1698 Maxmem = atop(NDMPML4E * NBPML4);
1699 ndmpdpphys = NDMPML4E;
1700 ndmpdp = NDMPML4E * NPDEPG;
1701 }
1702 DMPDPphys = allocpages(firstaddr, ndmpdpphys);
1703 ndm1g = 0;
1704 if ((amd_feature & AMDID_PAGE1GB) != 0) {
1705 /*
1706 * Calculate the number of 1G pages that will fully fit in
1707 * Maxmem.
1708 */
1709 ndm1g = ptoa(Maxmem) >> PDPSHIFT;
1710
1711 /*
1712 * Allocate 2M pages for the kernel. These will be used in
1713 * place of the one or more 1G pages from ndm1g that maps
1714 * kernel memory into DMAP.
1715 */
1716 nkdmpde = howmany((vm_offset_t)brwsection - KERNSTART +
1717 kernphys - rounddown2(kernphys, NBPDP), NBPDP);
1718 DMPDkernphys = allocpages(firstaddr, nkdmpde);
1719 }
1720 if (ndm1g < ndmpdp)
1721 DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
1722 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
1723
1724 /* Allocate pages. */
1725 KPML4phys = allocpages(firstaddr, 1);
1726 KPDPphys = allocpages(firstaddr, NKPML4E);
1727 #ifdef KASAN
1728 KASANPDPphys = allocpages(firstaddr, NKASANPML4E);
1729 KASANPDphys = allocpages(firstaddr, 1);
1730 #endif
1731 #ifdef KMSAN
1732 /*
1733 * The KMSAN shadow maps are initially left unpopulated, since there is
1734 * no need to shadow memory above KERNBASE.
1735 */
1736 KMSANSHADPDPphys = allocpages(firstaddr, NKMSANSHADPML4E);
1737 KMSANORIGPDPphys = allocpages(firstaddr, NKMSANORIGPML4E);
1738 #endif
1739
1740 /*
1741 * Allocate the initial number of kernel page table pages required to
1742 * bootstrap. We defer this until after all memory-size dependent
1743 * allocations are done (e.g. direct map), so that we don't have to
1744 * build in too much slop in our estimate.
1745 *
1746 * Note that when NKPML4E > 1, we have an empty page underneath
1747 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
1748 * pages. (pmap_enter requires a PD page to exist for each KPML4E.)
1749 */
1750 nkpt_init(*firstaddr);
1751 nkpdpe = NKPDPE(nkpt);
1752
1753 KPTphys = allocpages(firstaddr, nkpt);
1754 KPDphys = allocpages(firstaddr, nkpdpe);
1755
1756 #ifdef KASAN
1757 nkasanpte = howmany(nkpt, KASAN_SHADOW_SCALE);
1758 KASANPTphys = allocpages(firstaddr, nkasanpte);
1759 KASANphys = allocpages(firstaddr, nkasanpte * NPTEPG);
1760 #endif
1761
1762 /*
1763 * Connect the zero-filled PT pages to their PD entries. This
1764 * implicitly maps the PT pages at their correct locations within
1765 * the PTmap.
1766 */
1767 pd_p = (pd_entry_t *)KPDphys;
1768 for (i = 0; i < nkpt; i++)
1769 pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1770
1771 /*
1772 * Map from start of the kernel in physical memory (staging
1773 * area) to the end of loader preallocated memory using 2MB
1774 * pages. This replaces some of the PD entries created above.
1775 * For compatibility, identity map 2M at the start.
1776 */
1777 pd_p[0] = X86_PG_V | PG_PS | pg_g | X86_PG_M | X86_PG_A |
1778 X86_PG_RW | pg_nx;
1779 for (i = 1, pax = kernphys; pax < KERNend; i++, pax += NBPDR) {
1780 /* Preset PG_M and PG_A because demotion expects it. */
1781 pd_p[i] = pax | X86_PG_V | PG_PS | pg_g | X86_PG_M |
1782 X86_PG_A | bootaddr_rwx(pax);
1783 }
1784
1785 /*
1786 * Because we map the physical blocks in 2M pages, adjust firstaddr
1787 * to record the physical blocks we've actually mapped into kernel
1788 * virtual address space.
1789 */
1790 if (*firstaddr < round_2mpage(KERNend))
1791 *firstaddr = round_2mpage(KERNend);
1792
1793 /* And connect up the PD to the PDP (leaving room for L4 pages) */
1794 pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
1795 for (i = 0; i < nkpdpe; i++)
1796 pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1797
1798 #ifdef KASAN
1799 kasankernbase = kasan_md_addr_to_shad(KERNBASE);
1800 kasankpdpi = pmap_pdpe_index(kasankernbase);
1801 kasankpdi = pmap_pde_index(kasankernbase);
1802
1803 pdp_p = (pdp_entry_t *)KASANPDPphys;
1804 pdp_p[kasankpdpi] = (KASANPDphys | X86_PG_RW | X86_PG_V | pg_nx);
1805
1806 pd_p = (pd_entry_t *)KASANPDphys;
1807 for (i = 0; i < nkasanpte; i++)
1808 pd_p[i + kasankpdi] = (KASANPTphys + ptoa(i)) | X86_PG_RW |
1809 X86_PG_V | pg_nx;
1810
1811 pt_p = (pt_entry_t *)KASANPTphys;
1812 for (i = 0; i < nkasanpte * NPTEPG; i++)
1813 pt_p[i] = (KASANphys + ptoa(i)) | X86_PG_RW | X86_PG_V |
1814 X86_PG_M | X86_PG_A | pg_nx;
1815 #endif
1816
1817 /*
1818 * Now, set up the direct map region using 2MB and/or 1GB pages. If
1819 * the end of physical memory is not aligned to a 1GB page boundary,
1820 * then the residual physical memory is mapped with 2MB pages. Later,
1821 * if pmap_mapdev{_attr}() uses the direct map for non-write-back
1822 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
1823 * that are partially used.
1824 */
1825 pd_p = (pd_entry_t *)DMPDphys;
1826 for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
1827 pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
1828 /* Preset PG_M and PG_A because demotion expects it. */
1829 pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1830 X86_PG_M | X86_PG_A | pg_nx;
1831 }
1832 pdp_p = (pdp_entry_t *)DMPDPphys;
1833 for (i = 0; i < ndm1g; i++) {
1834 pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
1835 /* Preset PG_M and PG_A because demotion expects it. */
1836 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1837 X86_PG_M | X86_PG_A | pg_nx;
1838 }
1839 for (j = 0; i < ndmpdp; i++, j++) {
1840 pdp_p[i] = DMPDphys + ptoa(j);
1841 pdp_p[i] |= X86_PG_RW | X86_PG_V | pg_nx;
1842 }
1843
1844 /*
1845 * Instead of using a 1G page for the memory containing the kernel,
1846 * use 2M pages with read-only and no-execute permissions. (If using 1G
1847 * pages, this will partially overwrite the PDPEs above.)
1848 */
1849 if (ndm1g > 0) {
1850 pd_p = (pd_entry_t *)DMPDkernphys;
1851 for (i = 0, pax = rounddown2(kernphys, NBPDP);
1852 i < NPDEPG * nkdmpde; i++, pax += NBPDR) {
1853 pd_p[i] = pax | X86_PG_V | PG_PS | pg_g | X86_PG_M |
1854 X86_PG_A | pg_nx | bootaddr_rwx(pax);
1855 }
1856 j = rounddown2(kernphys, NBPDP) >> PDPSHIFT;
1857 for (i = 0; i < nkdmpde; i++) {
1858 pdp_p[i + j] = (DMPDkernphys + ptoa(i)) |
1859 X86_PG_RW | X86_PG_V | pg_nx;
1860 }
1861 }
1862
1863 /* And recursively map PML4 to itself in order to get PTmap */
1864 p4_p = (pml4_entry_t *)KPML4phys;
1865 p4_p[PML4PML4I] = KPML4phys;
1866 p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | pg_nx;
1867
1868 #ifdef KASAN
1869 /* Connect the KASAN shadow map slots up to the PML4. */
1870 for (i = 0; i < NKASANPML4E; i++) {
1871 p4_p[KASANPML4I + i] = KASANPDPphys + ptoa(i);
1872 p4_p[KASANPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1873 }
1874 #endif
1875
1876 #ifdef KMSAN
1877 /* Connect the KMSAN shadow map slots up to the PML4. */
1878 for (i = 0; i < NKMSANSHADPML4E; i++) {
1879 p4_p[KMSANSHADPML4I + i] = KMSANSHADPDPphys + ptoa(i);
1880 p4_p[KMSANSHADPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1881 }
1882
1883 /* Connect the KMSAN origin map slots up to the PML4. */
1884 for (i = 0; i < NKMSANORIGPML4E; i++) {
1885 p4_p[KMSANORIGPML4I + i] = KMSANORIGPDPphys + ptoa(i);
1886 p4_p[KMSANORIGPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1887 }
1888 #endif
1889
1890 /* Connect the Direct Map slots up to the PML4. */
1891 for (i = 0; i < ndmpdpphys; i++) {
1892 p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
1893 p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1894 }
1895
1896 /* Connect the KVA slots up to the PML4 */
1897 for (i = 0; i < NKPML4E; i++) {
1898 p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
1899 p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V;
1900 }
1901
1902 kernel_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
1903 TSEXIT();
1904 }
1905
1906 /*
1907 * Bootstrap the system enough to run with virtual memory.
1908 *
1909 * On amd64 this is called after mapping has already been enabled
1910 * and just syncs the pmap module with what has already been done.
1911 * [We can't call it easily with mapping off since the kernel is not
1912 * mapped with PA == VA, hence we would have to relocate every address
1913 * from the linked base (virtual) address "KERNBASE" to the actual
1914 * (physical) address starting relative to 0]
1915 */
1916 void
pmap_bootstrap(vm_paddr_t * firstaddr)1917 pmap_bootstrap(vm_paddr_t *firstaddr)
1918 {
1919 vm_offset_t va;
1920 pt_entry_t *pte, *pcpu_pte;
1921 struct region_descriptor r_gdt;
1922 uint64_t cr4, pcpu0_phys;
1923 u_long res;
1924 int i;
1925
1926 TSENTER();
1927 KERNend = *firstaddr;
1928 res = atop(KERNend - (vm_paddr_t)kernphys);
1929
1930 if (!pti)
1931 pg_g = X86_PG_G;
1932
1933 /*
1934 * Create an initial set of page tables to run the kernel in.
1935 */
1936 create_pagetables(firstaddr);
1937
1938 pcpu0_phys = allocpages(firstaddr, 1);
1939
1940 /*
1941 * Add a physical memory segment (vm_phys_seg) corresponding to the
1942 * preallocated kernel page table pages so that vm_page structures
1943 * representing these pages will be created. The vm_page structures
1944 * are required for promotion of the corresponding kernel virtual
1945 * addresses to superpage mappings.
1946 */
1947 vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1948
1949 /*
1950 * Account for the virtual addresses mapped by create_pagetables().
1951 */
1952 virtual_avail = (vm_offset_t)KERNSTART + round_2mpage(KERNend -
1953 (vm_paddr_t)kernphys);
1954 virtual_end = VM_MAX_KERNEL_ADDRESS;
1955
1956 /*
1957 * Enable PG_G global pages, then switch to the kernel page
1958 * table from the bootstrap page table. After the switch, it
1959 * is possible to enable SMEP and SMAP since PG_U bits are
1960 * correct now.
1961 */
1962 cr4 = rcr4();
1963 cr4 |= CR4_PGE;
1964 load_cr4(cr4);
1965 load_cr3(KPML4phys);
1966 if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1967 cr4 |= CR4_SMEP;
1968 if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
1969 cr4 |= CR4_SMAP;
1970 load_cr4(cr4);
1971
1972 /*
1973 * Initialize the kernel pmap (which is statically allocated).
1974 * Count bootstrap data as being resident in case any of this data is
1975 * later unmapped (using pmap_remove()) and freed.
1976 */
1977 PMAP_LOCK_INIT(kernel_pmap);
1978 kernel_pmap->pm_pmltop = kernel_pml4;
1979 kernel_pmap->pm_cr3 = KPML4phys;
1980 kernel_pmap->pm_ucr3 = PMAP_NO_CR3;
1981 TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1982 kernel_pmap->pm_stats.resident_count = res;
1983 vm_radix_init(&kernel_pmap->pm_root);
1984 kernel_pmap->pm_flags = pmap_flags;
1985 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
1986 rangeset_init(&kernel_pmap->pm_pkru, pkru_dup_range,
1987 pkru_free_range, kernel_pmap, M_NOWAIT);
1988 }
1989
1990 /*
1991 * The kernel pmap is always active on all CPUs. Once CPUs are
1992 * enumerated, the mask will be set equal to all_cpus.
1993 */
1994 CPU_FILL(&kernel_pmap->pm_active);
1995
1996 /*
1997 * Initialize the TLB invalidations generation number lock.
1998 */
1999 mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
2000
2001 /*
2002 * Reserve some special page table entries/VA space for temporary
2003 * mapping of pages.
2004 */
2005 #define SYSMAP(c, p, v, n) \
2006 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
2007
2008 va = virtual_avail;
2009 pte = vtopte(va);
2010
2011 /*
2012 * Crashdump maps. The first page is reused as CMAP1 for the
2013 * memory test.
2014 */
2015 SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
2016 CADDR1 = crashdumpmap;
2017
2018 SYSMAP(struct pcpu *, pcpu_pte, __pcpu, MAXCPU);
2019 virtual_avail = va;
2020
2021 /*
2022 * Map the BSP PCPU now, the rest of the PCPUs are mapped by
2023 * amd64_mp_alloc_pcpu()/start_all_aps() when we know the
2024 * number of CPUs and NUMA affinity.
2025 */
2026 pcpu_pte[0] = pcpu0_phys | X86_PG_V | X86_PG_RW | pg_g | pg_nx |
2027 X86_PG_M | X86_PG_A;
2028 for (i = 1; i < MAXCPU; i++)
2029 pcpu_pte[i] = 0;
2030
2031 /*
2032 * Re-initialize PCPU area for BSP after switching.
2033 * Make hardware use gdt and common_tss from the new PCPU.
2034 */
2035 STAILQ_INIT(&cpuhead);
2036 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2037 pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu));
2038 amd64_bsp_pcpu_init1(&__pcpu[0]);
2039 amd64_bsp_ist_init(&__pcpu[0]);
2040 __pcpu[0].pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
2041 IOPERM_BITMAP_SIZE;
2042 memcpy(__pcpu[0].pc_gdt, temp_bsp_pcpu.pc_gdt, NGDT *
2043 sizeof(struct user_segment_descriptor));
2044 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&__pcpu[0].pc_common_tss;
2045 ssdtosyssd(&gdt_segs[GPROC0_SEL],
2046 (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
2047 r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
2048 r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
2049 lgdt(&r_gdt);
2050 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2051 ltr(GSEL(GPROC0_SEL, SEL_KPL));
2052 __pcpu[0].pc_dynamic = temp_bsp_pcpu.pc_dynamic;
2053 __pcpu[0].pc_acpi_id = temp_bsp_pcpu.pc_acpi_id;
2054
2055 /*
2056 * Initialize the PAT MSR.
2057 * pmap_init_pat() clears and sets CR4_PGE, which, as a
2058 * side-effect, invalidates stale PG_G TLB entries that might
2059 * have been created in our pre-boot environment.
2060 */
2061 pmap_init_pat();
2062
2063 /* Initialize TLB Context Id. */
2064 if (pmap_pcid_enabled) {
2065 kernel_pmap->pm_pcidp = (void *)(uintptr_t)
2066 offsetof(struct pcpu, pc_kpmap_store);
2067
2068 PCPU_SET(kpmap_store.pm_pcid, PMAP_PCID_KERN);
2069 PCPU_SET(kpmap_store.pm_gen, 1);
2070
2071 /*
2072 * PMAP_PCID_KERN + 1 is used for initialization of
2073 * proc0 pmap. The pmap' pcid state might be used by
2074 * EFIRT entry before first context switch, so it
2075 * needs to be valid.
2076 */
2077 PCPU_SET(pcid_next, PMAP_PCID_KERN + 2);
2078 PCPU_SET(pcid_gen, 1);
2079
2080 /*
2081 * pcpu area for APs is zeroed during AP startup.
2082 * pc_pcid_next and pc_pcid_gen are initialized by AP
2083 * during pcpu setup.
2084 */
2085 load_cr4(rcr4() | CR4_PCIDE);
2086 }
2087 TSEXIT();
2088 }
2089
2090 /*
2091 * Setup the PAT MSR.
2092 */
2093 void
pmap_init_pat(void)2094 pmap_init_pat(void)
2095 {
2096 uint64_t pat_msr;
2097 u_long cr0, cr4;
2098 int i;
2099
2100 /* Bail if this CPU doesn't implement PAT. */
2101 if ((cpu_feature & CPUID_PAT) == 0)
2102 panic("no PAT??");
2103
2104 /* Set default PAT index table. */
2105 for (i = 0; i < PAT_INDEX_SIZE; i++)
2106 pat_index[i] = -1;
2107 pat_index[PAT_WRITE_BACK] = 0;
2108 pat_index[PAT_WRITE_THROUGH] = 1;
2109 pat_index[PAT_UNCACHEABLE] = 3;
2110 pat_index[PAT_WRITE_COMBINING] = 6;
2111 pat_index[PAT_WRITE_PROTECTED] = 5;
2112 pat_index[PAT_UNCACHED] = 2;
2113
2114 /*
2115 * Initialize default PAT entries.
2116 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
2117 * Program 5 and 6 as WP and WC.
2118 *
2119 * Leave 4 and 7 as WB and UC. Note that a recursive page table
2120 * mapping for a 2M page uses a PAT value with the bit 3 set due
2121 * to its overload with PG_PS.
2122 */
2123 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
2124 PAT_VALUE(1, PAT_WRITE_THROUGH) |
2125 PAT_VALUE(2, PAT_UNCACHED) |
2126 PAT_VALUE(3, PAT_UNCACHEABLE) |
2127 PAT_VALUE(4, PAT_WRITE_BACK) |
2128 PAT_VALUE(5, PAT_WRITE_PROTECTED) |
2129 PAT_VALUE(6, PAT_WRITE_COMBINING) |
2130 PAT_VALUE(7, PAT_UNCACHEABLE);
2131
2132 /* Disable PGE. */
2133 cr4 = rcr4();
2134 load_cr4(cr4 & ~CR4_PGE);
2135
2136 /* Disable caches (CD = 1, NW = 0). */
2137 cr0 = rcr0();
2138 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
2139
2140 /* Flushes caches and TLBs. */
2141 wbinvd();
2142 invltlb();
2143
2144 /* Update PAT and index table. */
2145 wrmsr(MSR_PAT, pat_msr);
2146
2147 /* Flush caches and TLBs again. */
2148 wbinvd();
2149 invltlb();
2150
2151 /* Restore caches and PGE. */
2152 load_cr0(cr0);
2153 load_cr4(cr4);
2154 }
2155
2156 vm_page_t
pmap_page_alloc_below_4g(bool zeroed)2157 pmap_page_alloc_below_4g(bool zeroed)
2158 {
2159 return (vm_page_alloc_noobj_contig((zeroed ? VM_ALLOC_ZERO : 0),
2160 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT));
2161 }
2162
2163 extern const char la57_trampoline[], la57_trampoline_gdt_desc[],
2164 la57_trampoline_gdt[], la57_trampoline_end[];
2165
2166 static void
pmap_bootstrap_la57(void * arg __unused)2167 pmap_bootstrap_la57(void *arg __unused)
2168 {
2169 char *v_code;
2170 pml5_entry_t *v_pml5;
2171 pml4_entry_t *v_pml4;
2172 pdp_entry_t *v_pdp;
2173 pd_entry_t *v_pd;
2174 pt_entry_t *v_pt;
2175 vm_page_t m_code, m_pml4, m_pdp, m_pd, m_pt, m_pml5;
2176 void (*la57_tramp)(uint64_t pml5);
2177 struct region_descriptor r_gdt;
2178
2179 if ((cpu_stdext_feature2 & CPUID_STDEXT2_LA57) == 0)
2180 return;
2181 TUNABLE_INT_FETCH("vm.pmap.la57", &la57);
2182 if (!la57)
2183 return;
2184
2185 r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
2186 r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
2187
2188 m_code = pmap_page_alloc_below_4g(true);
2189 v_code = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_code));
2190 m_pml5 = pmap_page_alloc_below_4g(true);
2191 KPML5phys = VM_PAGE_TO_PHYS(m_pml5);
2192 v_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(KPML5phys);
2193 m_pml4 = pmap_page_alloc_below_4g(true);
2194 v_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
2195 m_pdp = pmap_page_alloc_below_4g(true);
2196 v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
2197 m_pd = pmap_page_alloc_below_4g(true);
2198 v_pd = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd));
2199 m_pt = pmap_page_alloc_below_4g(true);
2200 v_pt = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pt));
2201
2202 /*
2203 * Map m_code 1:1, it appears below 4G in KVA due to physical
2204 * address being below 4G. Since kernel KVA is in upper half,
2205 * the pml4e should be zero and free for temporary use.
2206 */
2207 kernel_pmap->pm_pmltop[pmap_pml4e_index(VM_PAGE_TO_PHYS(m_code))] =
2208 VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V | X86_PG_RW | X86_PG_A |
2209 X86_PG_M;
2210 v_pdp[pmap_pdpe_index(VM_PAGE_TO_PHYS(m_code))] =
2211 VM_PAGE_TO_PHYS(m_pd) | X86_PG_V | X86_PG_RW | X86_PG_A |
2212 X86_PG_M;
2213 v_pd[pmap_pde_index(VM_PAGE_TO_PHYS(m_code))] =
2214 VM_PAGE_TO_PHYS(m_pt) | X86_PG_V | X86_PG_RW | X86_PG_A |
2215 X86_PG_M;
2216 v_pt[pmap_pte_index(VM_PAGE_TO_PHYS(m_code))] =
2217 VM_PAGE_TO_PHYS(m_code) | X86_PG_V | X86_PG_RW | X86_PG_A |
2218 X86_PG_M;
2219
2220 /*
2221 * Add pml5 entry at top of KVA pointing to existing pml4 table,
2222 * entering all existing kernel mappings into level 5 table.
2223 */
2224 v_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
2225 X86_PG_RW | X86_PG_A | X86_PG_M | pg_g;
2226
2227 /*
2228 * Add pml5 entry for 1:1 trampoline mapping after LA57 is turned on.
2229 */
2230 v_pml5[pmap_pml5e_index(VM_PAGE_TO_PHYS(m_code))] =
2231 VM_PAGE_TO_PHYS(m_pml4) | X86_PG_V | X86_PG_RW | X86_PG_A |
2232 X86_PG_M;
2233 v_pml4[pmap_pml4e_index(VM_PAGE_TO_PHYS(m_code))] =
2234 VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V | X86_PG_RW | X86_PG_A |
2235 X86_PG_M;
2236
2237 /*
2238 * Copy and call the 48->57 trampoline, hope we return there, alive.
2239 */
2240 bcopy(la57_trampoline, v_code, la57_trampoline_end - la57_trampoline);
2241 *(u_long *)(v_code + 2 + (la57_trampoline_gdt_desc - la57_trampoline)) =
2242 la57_trampoline_gdt - la57_trampoline + VM_PAGE_TO_PHYS(m_code);
2243 la57_tramp = (void (*)(uint64_t))VM_PAGE_TO_PHYS(m_code);
2244 invlpg((vm_offset_t)la57_tramp);
2245 la57_tramp(KPML5phys);
2246
2247 /*
2248 * gdt was necessary reset, switch back to our gdt.
2249 */
2250 lgdt(&r_gdt);
2251 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2252 load_ds(_udatasel);
2253 load_es(_udatasel);
2254 load_fs(_ufssel);
2255 ssdtosyssd(&gdt_segs[GPROC0_SEL],
2256 (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
2257 ltr(GSEL(GPROC0_SEL, SEL_KPL));
2258
2259 /*
2260 * Now unmap the trampoline, and free the pages.
2261 * Clear pml5 entry used for 1:1 trampoline mapping.
2262 */
2263 pte_clear(&v_pml5[pmap_pml5e_index(VM_PAGE_TO_PHYS(m_code))]);
2264 invlpg((vm_offset_t)v_code);
2265 vm_page_free(m_code);
2266 vm_page_free(m_pdp);
2267 vm_page_free(m_pd);
2268 vm_page_free(m_pt);
2269
2270 /*
2271 * Recursively map PML5 to itself in order to get PTmap and
2272 * PDmap.
2273 */
2274 v_pml5[PML5PML5I] = KPML5phys | X86_PG_RW | X86_PG_V | pg_nx;
2275
2276 vtoptem = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT +
2277 NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1) << 3;
2278 PTmap = (vm_offset_t)P5Tmap;
2279 vtopdem = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
2280 NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1) << 3;
2281 PDmap = (vm_offset_t)P5Dmap;
2282
2283 kernel_pmap->pm_cr3 = KPML5phys;
2284 kernel_pmap->pm_pmltop = v_pml5;
2285 pmap_pt_page_count_adj(kernel_pmap, 1);
2286 }
2287 SYSINIT(la57, SI_SUB_KMEM, SI_ORDER_ANY, pmap_bootstrap_la57, NULL);
2288
2289 /*
2290 * Initialize a vm_page's machine-dependent fields.
2291 */
2292 void
pmap_page_init(vm_page_t m)2293 pmap_page_init(vm_page_t m)
2294 {
2295
2296 TAILQ_INIT(&m->md.pv_list);
2297 m->md.pat_mode = PAT_WRITE_BACK;
2298 }
2299
2300 static int pmap_allow_2m_x_ept;
2301 SYSCTL_INT(_vm_pmap, OID_AUTO, allow_2m_x_ept, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
2302 &pmap_allow_2m_x_ept, 0,
2303 "Allow executable superpage mappings in EPT");
2304
2305 void
pmap_allow_2m_x_ept_recalculate(void)2306 pmap_allow_2m_x_ept_recalculate(void)
2307 {
2308 /*
2309 * SKL002, SKL012S. Since the EPT format is only used by
2310 * Intel CPUs, the vendor check is merely a formality.
2311 */
2312 if (!(cpu_vendor_id != CPU_VENDOR_INTEL ||
2313 (cpu_ia32_arch_caps & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0 ||
2314 (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
2315 (CPUID_TO_MODEL(cpu_id) == 0x26 || /* Atoms */
2316 CPUID_TO_MODEL(cpu_id) == 0x27 ||
2317 CPUID_TO_MODEL(cpu_id) == 0x35 ||
2318 CPUID_TO_MODEL(cpu_id) == 0x36 ||
2319 CPUID_TO_MODEL(cpu_id) == 0x37 ||
2320 CPUID_TO_MODEL(cpu_id) == 0x86 ||
2321 CPUID_TO_MODEL(cpu_id) == 0x1c ||
2322 CPUID_TO_MODEL(cpu_id) == 0x4a ||
2323 CPUID_TO_MODEL(cpu_id) == 0x4c ||
2324 CPUID_TO_MODEL(cpu_id) == 0x4d ||
2325 CPUID_TO_MODEL(cpu_id) == 0x5a ||
2326 CPUID_TO_MODEL(cpu_id) == 0x5c ||
2327 CPUID_TO_MODEL(cpu_id) == 0x5d ||
2328 CPUID_TO_MODEL(cpu_id) == 0x5f ||
2329 CPUID_TO_MODEL(cpu_id) == 0x6e ||
2330 CPUID_TO_MODEL(cpu_id) == 0x7a ||
2331 CPUID_TO_MODEL(cpu_id) == 0x57 || /* Knights */
2332 CPUID_TO_MODEL(cpu_id) == 0x85))))
2333 pmap_allow_2m_x_ept = 1;
2334 #ifndef BURN_BRIDGES
2335 TUNABLE_INT_FETCH("hw.allow_2m_x_ept", &pmap_allow_2m_x_ept);
2336 #endif
2337 TUNABLE_INT_FETCH("vm.pmap.allow_2m_x_ept", &pmap_allow_2m_x_ept);
2338 }
2339
2340 static bool
pmap_allow_2m_x_page(pmap_t pmap,bool executable)2341 pmap_allow_2m_x_page(pmap_t pmap, bool executable)
2342 {
2343
2344 return (pmap->pm_type != PT_EPT || !executable ||
2345 !pmap_allow_2m_x_ept);
2346 }
2347
2348 #ifdef NUMA
2349 static void
pmap_init_pv_table(void)2350 pmap_init_pv_table(void)
2351 {
2352 struct pmap_large_md_page *pvd;
2353 vm_size_t s;
2354 long start, end, highest, pv_npg;
2355 int domain, i, j, pages;
2356
2357 /*
2358 * For correctness we depend on the size being evenly divisible into a
2359 * page. As a tradeoff between performance and total memory use, the
2360 * entry is 64 bytes (aka one cacheline) in size. Not being smaller
2361 * avoids false-sharing, but not being 128 bytes potentially allows for
2362 * avoidable traffic due to adjacent cacheline prefetcher.
2363 *
2364 * Assert the size so that accidental changes fail to compile.
2365 */
2366 CTASSERT((sizeof(*pvd) == 64));
2367
2368 /*
2369 * Calculate the size of the array.
2370 */
2371 pmap_last_pa = vm_phys_segs[vm_phys_nsegs - 1].end;
2372 pv_npg = howmany(pmap_last_pa, NBPDR);
2373 s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
2374 s = round_page(s);
2375 pv_table = (struct pmap_large_md_page *)kva_alloc(s);
2376 if (pv_table == NULL)
2377 panic("%s: kva_alloc failed\n", __func__);
2378
2379 /*
2380 * Iterate physical segments to allocate space for respective pages.
2381 */
2382 highest = -1;
2383 s = 0;
2384 for (i = 0; i < vm_phys_nsegs; i++) {
2385 end = vm_phys_segs[i].end / NBPDR;
2386 domain = vm_phys_segs[i].domain;
2387
2388 if (highest >= end)
2389 continue;
2390
2391 start = highest + 1;
2392 pvd = &pv_table[start];
2393
2394 pages = end - start + 1;
2395 s = round_page(pages * sizeof(*pvd));
2396 highest = start + (s / sizeof(*pvd)) - 1;
2397
2398 for (j = 0; j < s; j += PAGE_SIZE) {
2399 vm_page_t m = vm_page_alloc_noobj_domain(domain, 0);
2400 if (m == NULL)
2401 panic("failed to allocate PV table page");
2402 pmap_qenter((vm_offset_t)pvd + j, &m, 1);
2403 }
2404
2405 for (j = 0; j < s / sizeof(*pvd); j++) {
2406 rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW);
2407 TAILQ_INIT(&pvd->pv_page.pv_list);
2408 pvd->pv_page.pv_gen = 0;
2409 pvd->pv_page.pat_mode = 0;
2410 pvd->pv_invl_gen = 0;
2411 pvd++;
2412 }
2413 }
2414 pvd = &pv_dummy_large;
2415 rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW);
2416 TAILQ_INIT(&pvd->pv_page.pv_list);
2417 pvd->pv_page.pv_gen = 0;
2418 pvd->pv_page.pat_mode = 0;
2419 pvd->pv_invl_gen = 0;
2420 }
2421 #else
2422 static void
pmap_init_pv_table(void)2423 pmap_init_pv_table(void)
2424 {
2425 vm_size_t s;
2426 long i, pv_npg;
2427
2428 /*
2429 * Initialize the pool of pv list locks.
2430 */
2431 for (i = 0; i < NPV_LIST_LOCKS; i++)
2432 rw_init(&pv_list_locks[i], "pmap pv list");
2433
2434 /*
2435 * Calculate the size of the pv head table for superpages.
2436 */
2437 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
2438
2439 /*
2440 * Allocate memory for the pv head table for superpages.
2441 */
2442 s = (vm_size_t)pv_npg * sizeof(struct md_page);
2443 s = round_page(s);
2444 pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
2445 for (i = 0; i < pv_npg; i++)
2446 TAILQ_INIT(&pv_table[i].pv_list);
2447 TAILQ_INIT(&pv_dummy.pv_list);
2448 }
2449 #endif
2450
2451 /*
2452 * Initialize the pmap module.
2453 *
2454 * Called by vm_mem_init(), to initialize any structures that the pmap
2455 * system needs to map virtual memory.
2456 */
2457 void
pmap_init(void)2458 pmap_init(void)
2459 {
2460 struct pmap_preinit_mapping *ppim;
2461 vm_page_t m, mpte;
2462 int error, i, ret, skz63;
2463
2464 /* L1TF, reserve page @0 unconditionally */
2465 vm_page_blacklist_add(0, bootverbose);
2466
2467 /* Detect bare-metal Skylake Server and Skylake-X. */
2468 if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_INTEL &&
2469 CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x55) {
2470 /*
2471 * Skylake-X errata SKZ63. Processor May Hang When
2472 * Executing Code In an HLE Transaction Region between
2473 * 40000000H and 403FFFFFH.
2474 *
2475 * Mark the pages in the range as preallocated. It
2476 * seems to be impossible to distinguish between
2477 * Skylake Server and Skylake X.
2478 */
2479 skz63 = 1;
2480 TUNABLE_INT_FETCH("hw.skz63_enable", &skz63);
2481 if (skz63 != 0) {
2482 if (bootverbose)
2483 printf("SKZ63: skipping 4M RAM starting "
2484 "at physical 1G\n");
2485 for (i = 0; i < atop(0x400000); i++) {
2486 ret = vm_page_blacklist_add(0x40000000 +
2487 ptoa(i), false);
2488 if (!ret && bootverbose)
2489 printf("page at %#lx already used\n",
2490 0x40000000 + ptoa(i));
2491 }
2492 }
2493 }
2494
2495 /* IFU */
2496 pmap_allow_2m_x_ept_recalculate();
2497
2498 /*
2499 * Initialize the vm page array entries for the kernel pmap's
2500 * page table pages.
2501 */
2502 PMAP_LOCK(kernel_pmap);
2503 for (i = 0; i < nkpt; i++) {
2504 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
2505 KASSERT(mpte >= vm_page_array &&
2506 mpte < &vm_page_array[vm_page_array_size],
2507 ("pmap_init: page table page is out of range"));
2508 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
2509 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
2510 mpte->ref_count = 1;
2511
2512 /*
2513 * Collect the page table pages that were replaced by a 2MB
2514 * page in create_pagetables(). They are zero filled.
2515 */
2516 if ((i == 0 ||
2517 kernphys + ((vm_paddr_t)(i - 1) << PDRSHIFT) < KERNend) &&
2518 pmap_insert_pt_page(kernel_pmap, mpte, false, false))
2519 panic("pmap_init: pmap_insert_pt_page failed");
2520 }
2521 PMAP_UNLOCK(kernel_pmap);
2522 vm_wire_add(nkpt);
2523
2524 /*
2525 * If the kernel is running on a virtual machine, then it must assume
2526 * that MCA is enabled by the hypervisor. Moreover, the kernel must
2527 * be prepared for the hypervisor changing the vendor and family that
2528 * are reported by CPUID. Consequently, the workaround for AMD Family
2529 * 10h Erratum 383 is enabled if the processor's feature set does not
2530 * include at least one feature that is only supported by older Intel
2531 * or newer AMD processors.
2532 */
2533 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
2534 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
2535 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
2536 AMDID2_FMA4)) == 0)
2537 workaround_erratum383 = 1;
2538
2539 /*
2540 * Are large page mappings enabled?
2541 */
2542 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
2543 if (pg_ps_enabled) {
2544 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
2545 ("pmap_init: can't assign to pagesizes[1]"));
2546 pagesizes[1] = NBPDR;
2547 if ((amd_feature & AMDID_PAGE1GB) != 0) {
2548 KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
2549 ("pmap_init: can't assign to pagesizes[2]"));
2550 pagesizes[2] = NBPDP;
2551 }
2552 }
2553
2554 /*
2555 * Initialize pv chunk lists.
2556 */
2557 for (i = 0; i < PMAP_MEMDOM; i++) {
2558 mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL, MTX_DEF);
2559 TAILQ_INIT(&pv_chunks[i].pvc_list);
2560 }
2561 pmap_init_pv_table();
2562
2563 pmap_initialized = 1;
2564 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
2565 ppim = pmap_preinit_mapping + i;
2566 if (ppim->va == 0)
2567 continue;
2568 /* Make the direct map consistent */
2569 if (ppim->pa < dmaplimit && ppim->pa + ppim->sz <= dmaplimit) {
2570 (void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
2571 ppim->sz, ppim->mode);
2572 }
2573 if (!bootverbose)
2574 continue;
2575 printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
2576 ppim->pa, ppim->va, ppim->sz, ppim->mode);
2577 }
2578
2579 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
2580 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
2581 (vmem_addr_t *)&qframe);
2582 if (error != 0)
2583 panic("qframe allocation failed");
2584
2585 lm_ents = 8;
2586 TUNABLE_INT_FETCH("vm.pmap.large_map_pml4_entries", &lm_ents);
2587 if (lm_ents > LMEPML4I - LMSPML4I + 1)
2588 lm_ents = LMEPML4I - LMSPML4I + 1;
2589 #ifdef KMSAN
2590 if (lm_ents > KMSANORIGPML4I - LMSPML4I) {
2591 printf(
2592 "pmap: shrinking large map for KMSAN (%d slots to %ld slots)\n",
2593 lm_ents, KMSANORIGPML4I - LMSPML4I);
2594 lm_ents = KMSANORIGPML4I - LMSPML4I;
2595 }
2596 #endif
2597 if (bootverbose)
2598 printf("pmap: large map %u PML4 slots (%lu GB)\n",
2599 lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024));
2600 if (lm_ents != 0) {
2601 large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS,
2602 (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK);
2603 if (large_vmem == NULL) {
2604 printf("pmap: cannot create large map\n");
2605 lm_ents = 0;
2606 }
2607 for (i = 0; i < lm_ents; i++) {
2608 m = pmap_large_map_getptp_unlocked();
2609 /* XXXKIB la57 */
2610 kernel_pml4[LMSPML4I + i] = X86_PG_V |
2611 X86_PG_RW | X86_PG_A | X86_PG_M | pg_nx |
2612 VM_PAGE_TO_PHYS(m);
2613 }
2614 }
2615 }
2616
2617 SYSCTL_UINT(_vm_pmap, OID_AUTO, large_map_pml4_entries,
2618 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lm_ents, 0,
2619 "Maximum number of PML4 entries for use by large map (tunable). "
2620 "Each entry corresponds to 512GB of address space.");
2621
2622 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2623 "2MB page mapping counters");
2624
2625 static COUNTER_U64_DEFINE_EARLY(pmap_pde_demotions);
2626 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, demotions,
2627 CTLFLAG_RD, &pmap_pde_demotions, "2MB page demotions");
2628
2629 static COUNTER_U64_DEFINE_EARLY(pmap_pde_mappings);
2630 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
2631 &pmap_pde_mappings, "2MB page mappings");
2632
2633 static COUNTER_U64_DEFINE_EARLY(pmap_pde_p_failures);
2634 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
2635 &pmap_pde_p_failures, "2MB page promotion failures");
2636
2637 static COUNTER_U64_DEFINE_EARLY(pmap_pde_promotions);
2638 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
2639 &pmap_pde_promotions, "2MB page promotions");
2640
2641 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2642 "1GB page mapping counters");
2643
2644 static COUNTER_U64_DEFINE_EARLY(pmap_pdpe_demotions);
2645 SYSCTL_COUNTER_U64(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
2646 &pmap_pdpe_demotions, "1GB page demotions");
2647
2648 /***************************************************
2649 * Low level helper routines.....
2650 ***************************************************/
2651
2652 static pt_entry_t
pmap_swap_pat(pmap_t pmap,pt_entry_t entry)2653 pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
2654 {
2655 int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
2656
2657 switch (pmap->pm_type) {
2658 case PT_X86:
2659 case PT_RVI:
2660 /* Verify that both PAT bits are not set at the same time */
2661 KASSERT((entry & x86_pat_bits) != x86_pat_bits,
2662 ("Invalid PAT bits in entry %#lx", entry));
2663
2664 /* Swap the PAT bits if one of them is set */
2665 if ((entry & x86_pat_bits) != 0)
2666 entry ^= x86_pat_bits;
2667 break;
2668 case PT_EPT:
2669 /*
2670 * Nothing to do - the memory attributes are represented
2671 * the same way for regular pages and superpages.
2672 */
2673 break;
2674 default:
2675 panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
2676 }
2677
2678 return (entry);
2679 }
2680
2681 bool
pmap_is_valid_memattr(pmap_t pmap __unused,vm_memattr_t mode)2682 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
2683 {
2684
2685 return (mode >= 0 && mode < PAT_INDEX_SIZE &&
2686 pat_index[(int)mode] >= 0);
2687 }
2688
2689 /*
2690 * Determine the appropriate bits to set in a PTE or PDE for a specified
2691 * caching mode.
2692 */
2693 int
pmap_cache_bits(pmap_t pmap,int mode,bool is_pde)2694 pmap_cache_bits(pmap_t pmap, int mode, bool is_pde)
2695 {
2696 int cache_bits, pat_flag, pat_idx;
2697
2698 if (!pmap_is_valid_memattr(pmap, mode))
2699 panic("Unknown caching mode %d\n", mode);
2700
2701 switch (pmap->pm_type) {
2702 case PT_X86:
2703 case PT_RVI:
2704 /* The PAT bit is different for PTE's and PDE's. */
2705 pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2706
2707 /* Map the caching mode to a PAT index. */
2708 pat_idx = pat_index[mode];
2709
2710 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
2711 cache_bits = 0;
2712 if (pat_idx & 0x4)
2713 cache_bits |= pat_flag;
2714 if (pat_idx & 0x2)
2715 cache_bits |= PG_NC_PCD;
2716 if (pat_idx & 0x1)
2717 cache_bits |= PG_NC_PWT;
2718 break;
2719
2720 case PT_EPT:
2721 cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
2722 break;
2723
2724 default:
2725 panic("unsupported pmap type %d", pmap->pm_type);
2726 }
2727
2728 return (cache_bits);
2729 }
2730
2731 static int
pmap_cache_mask(pmap_t pmap,bool is_pde)2732 pmap_cache_mask(pmap_t pmap, bool is_pde)
2733 {
2734 int mask;
2735
2736 switch (pmap->pm_type) {
2737 case PT_X86:
2738 case PT_RVI:
2739 mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
2740 break;
2741 case PT_EPT:
2742 mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
2743 break;
2744 default:
2745 panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
2746 }
2747
2748 return (mask);
2749 }
2750
2751 static int
pmap_pat_index(pmap_t pmap,pt_entry_t pte,bool is_pde)2752 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde)
2753 {
2754 int pat_flag, pat_idx;
2755
2756 pat_idx = 0;
2757 switch (pmap->pm_type) {
2758 case PT_X86:
2759 case PT_RVI:
2760 /* The PAT bit is different for PTE's and PDE's. */
2761 pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2762
2763 if ((pte & pat_flag) != 0)
2764 pat_idx |= 0x4;
2765 if ((pte & PG_NC_PCD) != 0)
2766 pat_idx |= 0x2;
2767 if ((pte & PG_NC_PWT) != 0)
2768 pat_idx |= 0x1;
2769 break;
2770 case PT_EPT:
2771 if ((pte & EPT_PG_IGNORE_PAT) != 0)
2772 panic("EPT PTE %#lx has no PAT memory type", pte);
2773 pat_idx = (pte & EPT_PG_MEMORY_TYPE(0x7)) >> 3;
2774 break;
2775 }
2776
2777 /* See pmap_init_pat(). */
2778 if (pat_idx == 4)
2779 pat_idx = 0;
2780 if (pat_idx == 7)
2781 pat_idx = 3;
2782
2783 return (pat_idx);
2784 }
2785
2786 bool
pmap_ps_enabled(pmap_t pmap)2787 pmap_ps_enabled(pmap_t pmap)
2788 {
2789
2790 return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
2791 }
2792
2793 static void
pmap_update_pde_store(pmap_t pmap,pd_entry_t * pde,pd_entry_t newpde)2794 pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
2795 {
2796
2797 switch (pmap->pm_type) {
2798 case PT_X86:
2799 break;
2800 case PT_RVI:
2801 case PT_EPT:
2802 /*
2803 * XXX
2804 * This is a little bogus since the generation number is
2805 * supposed to be bumped up when a region of the address
2806 * space is invalidated in the page tables.
2807 *
2808 * In this case the old PDE entry is valid but yet we want
2809 * to make sure that any mappings using the old entry are
2810 * invalidated in the TLB.
2811 *
2812 * The reason this works as expected is because we rendezvous
2813 * "all" host cpus and force any vcpu context to exit as a
2814 * side-effect.
2815 */
2816 atomic_add_long(&pmap->pm_eptgen, 1);
2817 break;
2818 default:
2819 panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
2820 }
2821 pde_store(pde, newpde);
2822 }
2823
2824 /*
2825 * After changing the page size for the specified virtual address in the page
2826 * table, flush the corresponding entries from the processor's TLB. Only the
2827 * calling processor's TLB is affected.
2828 *
2829 * The calling thread must be pinned to a processor.
2830 */
2831 static void
pmap_update_pde_invalidate(pmap_t pmap,vm_offset_t va,pd_entry_t newpde)2832 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
2833 {
2834 pt_entry_t PG_G;
2835
2836 if (pmap_type_guest(pmap))
2837 return;
2838
2839 KASSERT(pmap->pm_type == PT_X86,
2840 ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
2841
2842 PG_G = pmap_global_bit(pmap);
2843
2844 if ((newpde & PG_PS) == 0)
2845 /* Demotion: flush a specific 2MB page mapping. */
2846 pmap_invlpg(pmap, va);
2847 else if ((newpde & PG_G) == 0)
2848 /*
2849 * Promotion: flush every 4KB page mapping from the TLB
2850 * because there are too many to flush individually.
2851 */
2852 invltlb();
2853 else {
2854 /*
2855 * Promotion: flush every 4KB page mapping from the TLB,
2856 * including any global (PG_G) mappings.
2857 */
2858 invltlb_glob();
2859 }
2860 }
2861
2862 /*
2863 * The amd64 pmap uses different approaches to TLB invalidation
2864 * depending on the kernel configuration, available hardware features,
2865 * and known hardware errata. The kernel configuration option that
2866 * has the greatest operational impact on TLB invalidation is PTI,
2867 * which is enabled automatically on affected Intel CPUs. The most
2868 * impactful hardware features are first PCID, and then INVPCID
2869 * instruction presence. PCID usage is quite different for PTI
2870 * vs. non-PTI.
2871 *
2872 * * Kernel Page Table Isolation (PTI or KPTI) is used to mitigate
2873 * the Meltdown bug in some Intel CPUs. Under PTI, each user address
2874 * space is served by two page tables, user and kernel. The user
2875 * page table only maps user space and a kernel trampoline. The
2876 * kernel trampoline includes the entirety of the kernel text but
2877 * only the kernel data that is needed to switch from user to kernel
2878 * mode. The kernel page table maps the user and kernel address
2879 * spaces in their entirety. It is identical to the per-process
2880 * page table used in non-PTI mode.
2881 *
2882 * User page tables are only used when the CPU is in user mode.
2883 * Consequently, some TLB invalidations can be postponed until the
2884 * switch from kernel to user mode. In contrast, the user
2885 * space part of the kernel page table is used for copyout(9), so
2886 * TLB invalidations on this page table cannot be similarly postponed.
2887 *
2888 * The existence of a user mode page table for the given pmap is
2889 * indicated by a pm_ucr3 value that differs from PMAP_NO_CR3, in
2890 * which case pm_ucr3 contains the %cr3 register value for the user
2891 * mode page table's root.
2892 *
2893 * * The pm_active bitmask indicates which CPUs currently have the
2894 * pmap active. A CPU's bit is set on context switch to the pmap, and
2895 * cleared on switching off this CPU. For the kernel page table,
2896 * the pm_active field is immutable and contains all CPUs. The
2897 * kernel page table is always logically active on every processor,
2898 * but not necessarily in use by the hardware, e.g., in PTI mode.
2899 *
2900 * When requesting invalidation of virtual addresses with
2901 * pmap_invalidate_XXX() functions, the pmap sends shootdown IPIs to
2902 * all CPUs recorded as active in pm_active. Updates to and reads
2903 * from pm_active are not synchronized, and so they may race with
2904 * each other. Shootdown handlers are prepared to handle the race.
2905 *
2906 * * PCID is an optional feature of the long mode x86 MMU where TLB
2907 * entries are tagged with the 'Process ID' of the address space
2908 * they belong to. This feature provides a limited namespace for
2909 * process identifiers, 12 bits, supporting 4095 simultaneous IDs
2910 * total.
2911 *
2912 * Allocation of a PCID to a pmap is done by an algorithm described
2913 * in section 15.12, "Other TLB Consistency Algorithms", of
2914 * Vahalia's book "Unix Internals". A PCID cannot be allocated for
2915 * the whole lifetime of a pmap in pmap_pinit() due to the limited
2916 * namespace. Instead, a per-CPU, per-pmap PCID is assigned when
2917 * the CPU is about to start caching TLB entries from a pmap,
2918 * i.e., on the context switch that activates the pmap on the CPU.
2919 *
2920 * The PCID allocator maintains a per-CPU, per-pmap generation
2921 * count, pm_gen, which is incremented each time a new PCID is
2922 * allocated. On TLB invalidation, the generation counters for the
2923 * pmap are zeroed, which signals the context switch code that the
2924 * previously allocated PCID is no longer valid. Effectively,
2925 * zeroing any of these counters triggers a TLB shootdown for the
2926 * given CPU/address space, due to the allocation of a new PCID.
2927 *
2928 * Zeroing can be performed remotely. Consequently, if a pmap is
2929 * inactive on a CPU, then a TLB shootdown for that pmap and CPU can
2930 * be initiated by an ordinary memory access to reset the target
2931 * CPU's generation count within the pmap. The CPU initiating the
2932 * TLB shootdown does not need to send an IPI to the target CPU.
2933 *
2934 * * PTI + PCID. The available PCIDs are divided into two sets: PCIDs
2935 * for complete (kernel) page tables, and PCIDs for user mode page
2936 * tables. A user PCID value is obtained from the kernel PCID value
2937 * by setting the highest bit, 11, to 1 (0x800 == PMAP_PCID_USER_PT).
2938 *
2939 * User space page tables are activated on return to user mode, by
2940 * loading pm_ucr3 into %cr3. If the PCPU(ucr3_load_mask) requests
2941 * clearing bit 63 of the loaded ucr3, this effectively causes
2942 * complete invalidation of the user mode TLB entries for the
2943 * current pmap. In which case, local invalidations of individual
2944 * pages in the user page table are skipped.
2945 *
2946 * * Local invalidation, all modes. If the requested invalidation is
2947 * for a specific address or the total invalidation of a currently
2948 * active pmap, then the TLB is flushed using INVLPG for a kernel
2949 * page table, and INVPCID(INVPCID_CTXGLOB)/invltlb_glob() for a
2950 * user space page table(s).
2951 *
2952 * If the INVPCID instruction is available, it is used to flush user
2953 * entries from the kernel page table.
2954 *
2955 * When PCID is enabled, the INVLPG instruction invalidates all TLB
2956 * entries for the given page that either match the current PCID or
2957 * are global. Since TLB entries for the same page under different
2958 * PCIDs are unaffected, kernel pages which reside in all address
2959 * spaces could be problematic. We avoid the problem by creating
2960 * all kernel PTEs with the global flag (PG_G) set, when PTI is
2961 * disabled.
2962 *
2963 * * mode: PTI disabled, PCID present. The kernel reserves PCID 0 for its
2964 * address space, all other 4095 PCIDs are used for user mode spaces
2965 * as described above. A context switch allocates a new PCID if
2966 * the recorded PCID is zero or the recorded generation does not match
2967 * the CPU's generation, effectively flushing the TLB for this address space.
2968 * Total remote invalidation is performed by zeroing pm_gen for all CPUs.
2969 * local user page: INVLPG
2970 * local kernel page: INVLPG
2971 * local user total: INVPCID(CTX)
2972 * local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2973 * remote user page, inactive pmap: zero pm_gen
2974 * remote user page, active pmap: zero pm_gen + IPI:INVLPG
2975 * (Both actions are required to handle the aforementioned pm_active races.)
2976 * remote kernel page: IPI:INVLPG
2977 * remote user total, inactive pmap: zero pm_gen
2978 * remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) or
2979 * reload %cr3)
2980 * (See note above about pm_active races.)
2981 * remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2982 *
2983 * PTI enabled, PCID present.
2984 * local user page: INVLPG for kpt, INVPCID(ADDR) or (INVLPG for ucr3)
2985 * for upt
2986 * local kernel page: INVLPG
2987 * local user total: INVPCID(CTX) or reload %cr3 for kpt, clear PCID_SAVE
2988 * on loading UCR3 into %cr3 for upt
2989 * local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2990 * remote user page, inactive pmap: zero pm_gen
2991 * remote user page, active pmap: zero pm_gen + IPI:(INVLPG for kpt,
2992 * INVPCID(ADDR) for upt)
2993 * remote kernel page: IPI:INVLPG
2994 * remote user total, inactive pmap: zero pm_gen
2995 * remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) for kpt,
2996 * clear PCID_SAVE on loading UCR3 into $cr3 for upt)
2997 * remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2998 *
2999 * No PCID.
3000 * local user page: INVLPG
3001 * local kernel page: INVLPG
3002 * local user total: reload %cr3
3003 * local kernel total: invltlb_glob()
3004 * remote user page, inactive pmap: -
3005 * remote user page, active pmap: IPI:INVLPG
3006 * remote kernel page: IPI:INVLPG
3007 * remote user total, inactive pmap: -
3008 * remote user total, active pmap: IPI:(reload %cr3)
3009 * remote kernel total: IPI:invltlb_glob()
3010 * Since on return to user mode, the reload of %cr3 with ucr3 causes
3011 * TLB invalidation, no specific action is required for user page table.
3012 *
3013 * EPT. EPT pmaps do not map KVA, all mappings are userspace.
3014 * XXX TODO
3015 */
3016
3017 #ifdef SMP
3018 /*
3019 * Interrupt the cpus that are executing in the guest context.
3020 * This will force the vcpu to exit and the cached EPT mappings
3021 * will be invalidated by the host before the next vmresume.
3022 */
3023 static __inline void
pmap_invalidate_ept(pmap_t pmap)3024 pmap_invalidate_ept(pmap_t pmap)
3025 {
3026 smr_seq_t goal;
3027 int ipinum;
3028
3029 sched_pin();
3030 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
3031 ("pmap_invalidate_ept: absurd pm_active"));
3032
3033 /*
3034 * The TLB mappings associated with a vcpu context are not
3035 * flushed each time a different vcpu is chosen to execute.
3036 *
3037 * This is in contrast with a process's vtop mappings that
3038 * are flushed from the TLB on each context switch.
3039 *
3040 * Therefore we need to do more than just a TLB shootdown on
3041 * the active cpus in 'pmap->pm_active'. To do this we keep
3042 * track of the number of invalidations performed on this pmap.
3043 *
3044 * Each vcpu keeps a cache of this counter and compares it
3045 * just before a vmresume. If the counter is out-of-date an
3046 * invept will be done to flush stale mappings from the TLB.
3047 *
3048 * To ensure that all vCPU threads have observed the new counter
3049 * value before returning, we use SMR. Ordering is important here:
3050 * the VMM enters an SMR read section before loading the counter
3051 * and after updating the pm_active bit set. Thus, pm_active is
3052 * a superset of active readers, and any reader that has observed
3053 * the goal has observed the new counter value.
3054 */
3055 atomic_add_long(&pmap->pm_eptgen, 1);
3056
3057 goal = smr_advance(pmap->pm_eptsmr);
3058
3059 /*
3060 * Force the vcpu to exit and trap back into the hypervisor.
3061 */
3062 ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
3063 ipi_selected(pmap->pm_active, ipinum);
3064 sched_unpin();
3065
3066 /*
3067 * Ensure that all active vCPUs will observe the new generation counter
3068 * value before executing any more guest instructions.
3069 */
3070 smr_wait(pmap->pm_eptsmr, goal);
3071 }
3072
3073 static inline void
pmap_invalidate_preipi_pcid(pmap_t pmap)3074 pmap_invalidate_preipi_pcid(pmap_t pmap)
3075 {
3076 struct pmap_pcid *pcidp;
3077 u_int cpuid, i;
3078
3079 sched_pin();
3080
3081 cpuid = PCPU_GET(cpuid);
3082 if (pmap != PCPU_GET(curpmap))
3083 cpuid = 0xffffffff; /* An impossible value */
3084
3085 CPU_FOREACH(i) {
3086 if (cpuid != i) {
3087 pcidp = zpcpu_get_cpu(pmap->pm_pcidp, i);
3088 pcidp->pm_gen = 0;
3089 }
3090 }
3091
3092 /*
3093 * The fence is between stores to pm_gen and the read of the
3094 * pm_active mask. We need to ensure that it is impossible
3095 * for us to miss the bit update in pm_active and
3096 * simultaneously observe a non-zero pm_gen in
3097 * pmap_activate_sw(), otherwise TLB update is missed.
3098 * Without the fence, IA32 allows such an outcome. Note that
3099 * pm_active is updated by a locked operation, which provides
3100 * the reciprocal fence.
3101 */
3102 atomic_thread_fence_seq_cst();
3103 }
3104
3105 static void
pmap_invalidate_preipi_nopcid(pmap_t pmap __unused)3106 pmap_invalidate_preipi_nopcid(pmap_t pmap __unused)
3107 {
3108 sched_pin();
3109 }
3110
3111 DEFINE_IFUNC(static, void, pmap_invalidate_preipi, (pmap_t))
3112 {
3113 return (pmap_pcid_enabled ? pmap_invalidate_preipi_pcid :
3114 pmap_invalidate_preipi_nopcid);
3115 }
3116
3117 static inline void
pmap_invalidate_page_pcid_cb(pmap_t pmap,vm_offset_t va,const bool invpcid_works1)3118 pmap_invalidate_page_pcid_cb(pmap_t pmap, vm_offset_t va,
3119 const bool invpcid_works1)
3120 {
3121 struct invpcid_descr d;
3122 uint64_t kcr3, ucr3;
3123 uint32_t pcid;
3124
3125 /*
3126 * Because pm_pcid is recalculated on a context switch, we
3127 * must ensure there is no preemption, not just pinning.
3128 * Otherwise, we might use a stale value below.
3129 */
3130 CRITICAL_ASSERT(curthread);
3131
3132 /*
3133 * No need to do anything with user page tables invalidation
3134 * if there is no user page table, or invalidation is deferred
3135 * until the return to userspace. ucr3_load_mask is stable
3136 * because we have preemption disabled.
3137 */
3138 if (pmap->pm_ucr3 == PMAP_NO_CR3 ||
3139 PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
3140 return;
3141
3142 pcid = pmap_get_pcid(pmap);
3143 if (invpcid_works1) {
3144 d.pcid = pcid | PMAP_PCID_USER_PT;
3145 d.pad = 0;
3146 d.addr = va;
3147 invpcid(&d, INVPCID_ADDR);
3148 } else {
3149 kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3150 ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3151 pmap_pti_pcid_invlpg(ucr3, kcr3, va);
3152 }
3153 }
3154
3155 static void
pmap_invalidate_page_pcid_invpcid_cb(pmap_t pmap,vm_offset_t va)3156 pmap_invalidate_page_pcid_invpcid_cb(pmap_t pmap, vm_offset_t va)
3157 {
3158 pmap_invalidate_page_pcid_cb(pmap, va, true);
3159 }
3160
3161 static void
pmap_invalidate_page_pcid_noinvpcid_cb(pmap_t pmap,vm_offset_t va)3162 pmap_invalidate_page_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t va)
3163 {
3164 pmap_invalidate_page_pcid_cb(pmap, va, false);
3165 }
3166
3167 static void
pmap_invalidate_page_nopcid_cb(pmap_t pmap __unused,vm_offset_t va __unused)3168 pmap_invalidate_page_nopcid_cb(pmap_t pmap __unused, vm_offset_t va __unused)
3169 {
3170 }
3171
3172 DEFINE_IFUNC(static, void, pmap_invalidate_page_cb, (pmap_t, vm_offset_t))
3173 {
3174 if (pmap_pcid_enabled)
3175 return (invpcid_works ? pmap_invalidate_page_pcid_invpcid_cb :
3176 pmap_invalidate_page_pcid_noinvpcid_cb);
3177 return (pmap_invalidate_page_nopcid_cb);
3178 }
3179
3180 static void
pmap_invalidate_page_curcpu_cb(pmap_t pmap,vm_offset_t va,vm_offset_t addr2 __unused)3181 pmap_invalidate_page_curcpu_cb(pmap_t pmap, vm_offset_t va,
3182 vm_offset_t addr2 __unused)
3183 {
3184 if (pmap == kernel_pmap) {
3185 pmap_invlpg(kernel_pmap, va);
3186 } else if (pmap == PCPU_GET(curpmap)) {
3187 invlpg(va);
3188 pmap_invalidate_page_cb(pmap, va);
3189 }
3190 }
3191
3192 void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va)3193 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3194 {
3195 if (pmap_type_guest(pmap)) {
3196 pmap_invalidate_ept(pmap);
3197 return;
3198 }
3199
3200 KASSERT(pmap->pm_type == PT_X86,
3201 ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
3202
3203 pmap_invalidate_preipi(pmap);
3204 smp_masked_invlpg(va, pmap, pmap_invalidate_page_curcpu_cb);
3205 }
3206
3207 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
3208 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
3209
3210 static void
pmap_invalidate_range_pcid_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,const bool invpcid_works1)3211 pmap_invalidate_range_pcid_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3212 const bool invpcid_works1)
3213 {
3214 struct invpcid_descr d;
3215 uint64_t kcr3, ucr3;
3216 uint32_t pcid;
3217
3218 CRITICAL_ASSERT(curthread);
3219
3220 if (pmap != PCPU_GET(curpmap) ||
3221 pmap->pm_ucr3 == PMAP_NO_CR3 ||
3222 PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
3223 return;
3224
3225 pcid = pmap_get_pcid(pmap);
3226 if (invpcid_works1) {
3227 d.pcid = pcid | PMAP_PCID_USER_PT;
3228 d.pad = 0;
3229 for (d.addr = sva; d.addr < eva; d.addr += PAGE_SIZE)
3230 invpcid(&d, INVPCID_ADDR);
3231 } else {
3232 kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3233 ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3234 pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3235 }
3236 }
3237
3238 static void
pmap_invalidate_range_pcid_invpcid_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3239 pmap_invalidate_range_pcid_invpcid_cb(pmap_t pmap, vm_offset_t sva,
3240 vm_offset_t eva)
3241 {
3242 pmap_invalidate_range_pcid_cb(pmap, sva, eva, true);
3243 }
3244
3245 static void
pmap_invalidate_range_pcid_noinvpcid_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3246 pmap_invalidate_range_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t sva,
3247 vm_offset_t eva)
3248 {
3249 pmap_invalidate_range_pcid_cb(pmap, sva, eva, false);
3250 }
3251
3252 static void
pmap_invalidate_range_nopcid_cb(pmap_t pmap __unused,vm_offset_t sva __unused,vm_offset_t eva __unused)3253 pmap_invalidate_range_nopcid_cb(pmap_t pmap __unused, vm_offset_t sva __unused,
3254 vm_offset_t eva __unused)
3255 {
3256 }
3257
3258 DEFINE_IFUNC(static, void, pmap_invalidate_range_cb, (pmap_t, vm_offset_t,
3259 vm_offset_t))
3260 {
3261 if (pmap_pcid_enabled)
3262 return (invpcid_works ? pmap_invalidate_range_pcid_invpcid_cb :
3263 pmap_invalidate_range_pcid_noinvpcid_cb);
3264 return (pmap_invalidate_range_nopcid_cb);
3265 }
3266
3267 static void
pmap_invalidate_range_curcpu_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3268 pmap_invalidate_range_curcpu_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3269 {
3270 vm_offset_t addr;
3271
3272 if (pmap == kernel_pmap) {
3273 if (PCPU_GET(pcid_invlpg_workaround)) {
3274 struct invpcid_descr d = { 0 };
3275
3276 invpcid(&d, INVPCID_CTXGLOB);
3277 } else {
3278 for (addr = sva; addr < eva; addr += PAGE_SIZE)
3279 invlpg(addr);
3280 }
3281 } else if (pmap == PCPU_GET(curpmap)) {
3282 for (addr = sva; addr < eva; addr += PAGE_SIZE)
3283 invlpg(addr);
3284 pmap_invalidate_range_cb(pmap, sva, eva);
3285 }
3286 }
3287
3288 void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3289 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3290 {
3291 if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
3292 pmap_invalidate_all(pmap);
3293 return;
3294 }
3295
3296 if (pmap_type_guest(pmap)) {
3297 pmap_invalidate_ept(pmap);
3298 return;
3299 }
3300
3301 KASSERT(pmap->pm_type == PT_X86,
3302 ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
3303
3304 pmap_invalidate_preipi(pmap);
3305 smp_masked_invlpg_range(sva, eva, pmap,
3306 pmap_invalidate_range_curcpu_cb);
3307 }
3308
3309 static inline void
pmap_invalidate_all_pcid_cb(pmap_t pmap,bool invpcid_works1)3310 pmap_invalidate_all_pcid_cb(pmap_t pmap, bool invpcid_works1)
3311 {
3312 struct invpcid_descr d;
3313 uint64_t kcr3;
3314 uint32_t pcid;
3315
3316 if (pmap == kernel_pmap) {
3317 if (invpcid_works1) {
3318 bzero(&d, sizeof(d));
3319 invpcid(&d, INVPCID_CTXGLOB);
3320 } else {
3321 invltlb_glob();
3322 }
3323 } else if (pmap == PCPU_GET(curpmap)) {
3324 CRITICAL_ASSERT(curthread);
3325
3326 pcid = pmap_get_pcid(pmap);
3327 if (invpcid_works1) {
3328 d.pcid = pcid;
3329 d.pad = 0;
3330 d.addr = 0;
3331 invpcid(&d, INVPCID_CTX);
3332 } else {
3333 kcr3 = pmap->pm_cr3 | pcid;
3334 load_cr3(kcr3);
3335 }
3336 if (pmap->pm_ucr3 != PMAP_NO_CR3)
3337 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
3338 }
3339 }
3340
3341 static void
pmap_invalidate_all_pcid_invpcid_cb(pmap_t pmap)3342 pmap_invalidate_all_pcid_invpcid_cb(pmap_t pmap)
3343 {
3344 pmap_invalidate_all_pcid_cb(pmap, true);
3345 }
3346
3347 static void
pmap_invalidate_all_pcid_noinvpcid_cb(pmap_t pmap)3348 pmap_invalidate_all_pcid_noinvpcid_cb(pmap_t pmap)
3349 {
3350 pmap_invalidate_all_pcid_cb(pmap, false);
3351 }
3352
3353 static void
pmap_invalidate_all_nopcid_cb(pmap_t pmap)3354 pmap_invalidate_all_nopcid_cb(pmap_t pmap)
3355 {
3356 if (pmap == kernel_pmap)
3357 invltlb_glob();
3358 else if (pmap == PCPU_GET(curpmap))
3359 invltlb();
3360 }
3361
3362 DEFINE_IFUNC(static, void, pmap_invalidate_all_cb, (pmap_t))
3363 {
3364 if (pmap_pcid_enabled)
3365 return (invpcid_works ? pmap_invalidate_all_pcid_invpcid_cb :
3366 pmap_invalidate_all_pcid_noinvpcid_cb);
3367 return (pmap_invalidate_all_nopcid_cb);
3368 }
3369
3370 static void
pmap_invalidate_all_curcpu_cb(pmap_t pmap,vm_offset_t addr1 __unused,vm_offset_t addr2 __unused)3371 pmap_invalidate_all_curcpu_cb(pmap_t pmap, vm_offset_t addr1 __unused,
3372 vm_offset_t addr2 __unused)
3373 {
3374 pmap_invalidate_all_cb(pmap);
3375 }
3376
3377 void
pmap_invalidate_all(pmap_t pmap)3378 pmap_invalidate_all(pmap_t pmap)
3379 {
3380 if (pmap_type_guest(pmap)) {
3381 pmap_invalidate_ept(pmap);
3382 return;
3383 }
3384
3385 KASSERT(pmap->pm_type == PT_X86,
3386 ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
3387
3388 pmap_invalidate_preipi(pmap);
3389 smp_masked_invltlb(pmap, pmap_invalidate_all_curcpu_cb);
3390 }
3391
3392 static void
pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused,vm_offset_t va __unused,vm_offset_t addr2 __unused)3393 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, vm_offset_t va __unused,
3394 vm_offset_t addr2 __unused)
3395 {
3396 wbinvd();
3397 }
3398
3399 void
pmap_invalidate_cache(void)3400 pmap_invalidate_cache(void)
3401 {
3402 sched_pin();
3403 smp_cache_flush(pmap_invalidate_cache_curcpu_cb);
3404 }
3405
3406 struct pde_action {
3407 cpuset_t invalidate; /* processors that invalidate their TLB */
3408 pmap_t pmap;
3409 vm_offset_t va;
3410 pd_entry_t *pde;
3411 pd_entry_t newpde;
3412 u_int store; /* processor that updates the PDE */
3413 };
3414
3415 static void
pmap_update_pde_action(void * arg)3416 pmap_update_pde_action(void *arg)
3417 {
3418 struct pde_action *act = arg;
3419
3420 if (act->store == PCPU_GET(cpuid))
3421 pmap_update_pde_store(act->pmap, act->pde, act->newpde);
3422 }
3423
3424 static void
pmap_update_pde_teardown(void * arg)3425 pmap_update_pde_teardown(void *arg)
3426 {
3427 struct pde_action *act = arg;
3428
3429 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
3430 pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
3431 }
3432
3433 /*
3434 * Change the page size for the specified virtual address in a way that
3435 * prevents any possibility of the TLB ever having two entries that map the
3436 * same virtual address using different page sizes. This is the recommended
3437 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a
3438 * machine check exception for a TLB state that is improperly diagnosed as a
3439 * hardware error.
3440 */
3441 static void
pmap_update_pde(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,pd_entry_t newpde)3442 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3443 {
3444 struct pde_action act;
3445 cpuset_t active, other_cpus;
3446 u_int cpuid;
3447
3448 sched_pin();
3449 cpuid = PCPU_GET(cpuid);
3450 other_cpus = all_cpus;
3451 CPU_CLR(cpuid, &other_cpus);
3452 if (pmap == kernel_pmap || pmap_type_guest(pmap))
3453 active = all_cpus;
3454 else {
3455 active = pmap->pm_active;
3456 }
3457 if (CPU_OVERLAP(&active, &other_cpus)) {
3458 act.store = cpuid;
3459 act.invalidate = active;
3460 act.va = va;
3461 act.pmap = pmap;
3462 act.pde = pde;
3463 act.newpde = newpde;
3464 CPU_SET(cpuid, &active);
3465 smp_rendezvous_cpus(active,
3466 smp_no_rendezvous_barrier, pmap_update_pde_action,
3467 pmap_update_pde_teardown, &act);
3468 } else {
3469 pmap_update_pde_store(pmap, pde, newpde);
3470 if (CPU_ISSET(cpuid, &active))
3471 pmap_update_pde_invalidate(pmap, va, newpde);
3472 }
3473 sched_unpin();
3474 }
3475 #else /* !SMP */
3476 /*
3477 * Normal, non-SMP, invalidation functions.
3478 */
3479 void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va)3480 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3481 {
3482 struct invpcid_descr d;
3483 struct pmap_pcid *pcidp;
3484 uint64_t kcr3, ucr3;
3485 uint32_t pcid;
3486
3487 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3488 pmap->pm_eptgen++;
3489 return;
3490 }
3491 KASSERT(pmap->pm_type == PT_X86,
3492 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3493
3494 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3495 invlpg(va);
3496 if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3497 pmap->pm_ucr3 != PMAP_NO_CR3) {
3498 critical_enter();
3499 pcid = pmap_get_pcid(pmap);
3500 if (invpcid_works) {
3501 d.pcid = pcid | PMAP_PCID_USER_PT;
3502 d.pad = 0;
3503 d.addr = va;
3504 invpcid(&d, INVPCID_ADDR);
3505 } else {
3506 kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3507 ucr3 = pmap->pm_ucr3 | pcid |
3508 PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3509 pmap_pti_pcid_invlpg(ucr3, kcr3, va);
3510 }
3511 critical_exit();
3512 }
3513 } else if (pmap_pcid_enabled) {
3514 pcidp = zpcpu_get(pmap->pm_pcidp);
3515 pcidp->pm_gen = 0;
3516 }
3517 }
3518
3519 void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3520 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3521 {
3522 struct invpcid_descr d;
3523 struct pmap_pcid *pcidp;
3524 vm_offset_t addr;
3525 uint64_t kcr3, ucr3;
3526 uint32_t pcid;
3527
3528 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3529 pmap->pm_eptgen++;
3530 return;
3531 }
3532 KASSERT(pmap->pm_type == PT_X86,
3533 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3534
3535 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3536 for (addr = sva; addr < eva; addr += PAGE_SIZE)
3537 invlpg(addr);
3538 if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3539 pmap->pm_ucr3 != PMAP_NO_CR3) {
3540 critical_enter();
3541 pcid = pmap_get_pcid(pmap);
3542 if (invpcid_works) {
3543 d.pcid = pcid | PMAP_PCID_USER_PT;
3544 d.pad = 0;
3545 d.addr = sva;
3546 for (; d.addr < eva; d.addr += PAGE_SIZE)
3547 invpcid(&d, INVPCID_ADDR);
3548 } else {
3549 kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3550 ucr3 = pmap->pm_ucr3 | pcid |
3551 PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3552 pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3553 }
3554 critical_exit();
3555 }
3556 } else if (pmap_pcid_enabled) {
3557 pcidp = zpcpu_get(pmap->pm_pcidp);
3558 pcidp->pm_gen = 0;
3559 }
3560 }
3561
3562 void
pmap_invalidate_all(pmap_t pmap)3563 pmap_invalidate_all(pmap_t pmap)
3564 {
3565 struct invpcid_descr d;
3566 struct pmap_pcid *pcidp;
3567 uint64_t kcr3, ucr3;
3568 uint32_t pcid;
3569
3570 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3571 pmap->pm_eptgen++;
3572 return;
3573 }
3574 KASSERT(pmap->pm_type == PT_X86,
3575 ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
3576
3577 if (pmap == kernel_pmap) {
3578 if (pmap_pcid_enabled && invpcid_works) {
3579 bzero(&d, sizeof(d));
3580 invpcid(&d, INVPCID_CTXGLOB);
3581 } else {
3582 invltlb_glob();
3583 }
3584 } else if (pmap == PCPU_GET(curpmap)) {
3585 if (pmap_pcid_enabled) {
3586 critical_enter();
3587 pcid = pmap_get_pcid(pmap);
3588 if (invpcid_works) {
3589 d.pcid = pcid;
3590 d.pad = 0;
3591 d.addr = 0;
3592 invpcid(&d, INVPCID_CTX);
3593 if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3594 d.pcid |= PMAP_PCID_USER_PT;
3595 invpcid(&d, INVPCID_CTX);
3596 }
3597 } else {
3598 kcr3 = pmap->pm_cr3 | pcid;
3599 if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3600 ucr3 = pmap->pm_ucr3 | pcid |
3601 PMAP_PCID_USER_PT;
3602 pmap_pti_pcid_invalidate(ucr3, kcr3);
3603 } else
3604 load_cr3(kcr3);
3605 }
3606 critical_exit();
3607 } else {
3608 invltlb();
3609 }
3610 } else if (pmap_pcid_enabled) {
3611 pcidp = zpcpu_get(pmap->pm_pcidp);
3612 pcidp->pm_gen = 0;
3613 }
3614 }
3615
3616 void
pmap_invalidate_cache(void)3617 pmap_invalidate_cache(void)
3618 {
3619
3620 wbinvd();
3621 }
3622
3623 static void
pmap_update_pde(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,pd_entry_t newpde)3624 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3625 {
3626 struct pmap_pcid *pcidp;
3627
3628 pmap_update_pde_store(pmap, pde, newpde);
3629 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
3630 pmap_update_pde_invalidate(pmap, va, newpde);
3631 else {
3632 pcidp = zpcpu_get(pmap->pm_pcidp);
3633 pcidp->pm_gen = 0;
3634 }
3635 }
3636 #endif /* !SMP */
3637
3638 static void
pmap_invalidate_pde_page(pmap_t pmap,vm_offset_t va,pd_entry_t pde)3639 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
3640 {
3641
3642 /*
3643 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
3644 * by a promotion that did not invalidate the 512 4KB page mappings
3645 * that might exist in the TLB. Consequently, at this point, the TLB
3646 * may hold both 4KB and 2MB page mappings for the address range [va,
3647 * va + NBPDR). Therefore, the entire range must be invalidated here.
3648 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
3649 * 4KB page mappings for the address range [va, va + NBPDR), and so a
3650 * single INVLPG suffices to invalidate the 2MB page mapping from the
3651 * TLB.
3652 */
3653 if ((pde & PG_PROMOTED) != 0)
3654 pmap_invalidate_range(pmap, va, va + NBPDR - 1);
3655 else
3656 pmap_invalidate_page(pmap, va);
3657 }
3658
3659 DEFINE_IFUNC(, void, pmap_invalidate_cache_range,
3660 (vm_offset_t sva, vm_offset_t eva))
3661 {
3662
3663 if ((cpu_feature & CPUID_SS) != 0)
3664 return (pmap_invalidate_cache_range_selfsnoop);
3665 if ((cpu_feature & CPUID_CLFSH) != 0)
3666 return (pmap_force_invalidate_cache_range);
3667 return (pmap_invalidate_cache_range_all);
3668 }
3669
3670 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
3671
3672 static void
pmap_invalidate_cache_range_check_align(vm_offset_t sva,vm_offset_t eva)3673 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
3674 {
3675
3676 KASSERT((sva & PAGE_MASK) == 0,
3677 ("pmap_invalidate_cache_range: sva not page-aligned"));
3678 KASSERT((eva & PAGE_MASK) == 0,
3679 ("pmap_invalidate_cache_range: eva not page-aligned"));
3680 }
3681
3682 static void
pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,vm_offset_t eva)3683 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
3684 {
3685
3686 pmap_invalidate_cache_range_check_align(sva, eva);
3687 }
3688
3689 void
pmap_force_invalidate_cache_range(vm_offset_t sva,vm_offset_t eva)3690 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
3691 {
3692
3693 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
3694
3695 /*
3696 * XXX: Some CPUs fault, hang, or trash the local APIC
3697 * registers if we use CLFLUSH on the local APIC range. The
3698 * local APIC is always uncached, so we don't need to flush
3699 * for that range anyway.
3700 */
3701 if (pmap_kextract(sva) == lapic_paddr)
3702 return;
3703
3704 if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
3705 /*
3706 * Do per-cache line flush. Use a locked
3707 * instruction to insure that previous stores are
3708 * included in the write-back. The processor
3709 * propagates flush to other processors in the cache
3710 * coherence domain.
3711 */
3712 atomic_thread_fence_seq_cst();
3713 for (; sva < eva; sva += cpu_clflush_line_size)
3714 clflushopt(sva);
3715 atomic_thread_fence_seq_cst();
3716 } else {
3717 /*
3718 * Writes are ordered by CLFLUSH on Intel CPUs.
3719 */
3720 if (cpu_vendor_id != CPU_VENDOR_INTEL)
3721 mfence();
3722 for (; sva < eva; sva += cpu_clflush_line_size)
3723 clflush(sva);
3724 if (cpu_vendor_id != CPU_VENDOR_INTEL)
3725 mfence();
3726 }
3727 }
3728
3729 static void
pmap_invalidate_cache_range_all(vm_offset_t sva,vm_offset_t eva)3730 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
3731 {
3732
3733 pmap_invalidate_cache_range_check_align(sva, eva);
3734 pmap_invalidate_cache();
3735 }
3736
3737 /*
3738 * Remove the specified set of pages from the data and instruction caches.
3739 *
3740 * In contrast to pmap_invalidate_cache_range(), this function does not
3741 * rely on the CPU's self-snoop feature, because it is intended for use
3742 * when moving pages into a different cache domain.
3743 */
3744 void
pmap_invalidate_cache_pages(vm_page_t * pages,int count)3745 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
3746 {
3747 vm_offset_t daddr, eva;
3748 int i;
3749 bool useclflushopt;
3750
3751 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
3752 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
3753 ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
3754 pmap_invalidate_cache();
3755 else {
3756 if (useclflushopt)
3757 atomic_thread_fence_seq_cst();
3758 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3759 mfence();
3760 for (i = 0; i < count; i++) {
3761 daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
3762 eva = daddr + PAGE_SIZE;
3763 for (; daddr < eva; daddr += cpu_clflush_line_size) {
3764 if (useclflushopt)
3765 clflushopt(daddr);
3766 else
3767 clflush(daddr);
3768 }
3769 }
3770 if (useclflushopt)
3771 atomic_thread_fence_seq_cst();
3772 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3773 mfence();
3774 }
3775 }
3776
3777 void
pmap_flush_cache_range(vm_offset_t sva,vm_offset_t eva)3778 pmap_flush_cache_range(vm_offset_t sva, vm_offset_t eva)
3779 {
3780
3781 pmap_invalidate_cache_range_check_align(sva, eva);
3782
3783 if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) == 0) {
3784 pmap_force_invalidate_cache_range(sva, eva);
3785 return;
3786 }
3787
3788 /* See comment in pmap_force_invalidate_cache_range(). */
3789 if (pmap_kextract(sva) == lapic_paddr)
3790 return;
3791
3792 atomic_thread_fence_seq_cst();
3793 for (; sva < eva; sva += cpu_clflush_line_size)
3794 clwb(sva);
3795 atomic_thread_fence_seq_cst();
3796 }
3797
3798 void
pmap_flush_cache_phys_range(vm_paddr_t spa,vm_paddr_t epa,vm_memattr_t mattr)3799 pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
3800 {
3801 pt_entry_t *pte;
3802 vm_offset_t vaddr;
3803 int error __diagused;
3804 int pte_bits;
3805
3806 KASSERT((spa & PAGE_MASK) == 0,
3807 ("pmap_flush_cache_phys_range: spa not page-aligned"));
3808 KASSERT((epa & PAGE_MASK) == 0,
3809 ("pmap_flush_cache_phys_range: epa not page-aligned"));
3810
3811 if (spa < dmaplimit) {
3812 pmap_flush_cache_range(PHYS_TO_DMAP(spa), PHYS_TO_DMAP(MIN(
3813 dmaplimit, epa)));
3814 if (dmaplimit >= epa)
3815 return;
3816 spa = dmaplimit;
3817 }
3818
3819 pte_bits = pmap_cache_bits(kernel_pmap, mattr, false) | X86_PG_RW |
3820 X86_PG_V;
3821 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3822 &vaddr);
3823 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
3824 pte = vtopte(vaddr);
3825 for (; spa < epa; spa += PAGE_SIZE) {
3826 sched_pin();
3827 pte_store(pte, spa | pte_bits);
3828 pmap_invlpg(kernel_pmap, vaddr);
3829 /* XXXKIB atomic inside flush_cache_range are excessive */
3830 pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
3831 sched_unpin();
3832 }
3833 vmem_free(kernel_arena, vaddr, PAGE_SIZE);
3834 }
3835
3836 /*
3837 * Routine: pmap_extract
3838 * Function:
3839 * Extract the physical page address associated
3840 * with the given map/virtual_address pair.
3841 */
3842 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)3843 pmap_extract(pmap_t pmap, vm_offset_t va)
3844 {
3845 pdp_entry_t *pdpe;
3846 pd_entry_t *pde;
3847 pt_entry_t *pte, PG_V;
3848 vm_paddr_t pa;
3849
3850 pa = 0;
3851 PG_V = pmap_valid_bit(pmap);
3852 PMAP_LOCK(pmap);
3853 pdpe = pmap_pdpe(pmap, va);
3854 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
3855 if ((*pdpe & PG_PS) != 0)
3856 pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
3857 else {
3858 pde = pmap_pdpe_to_pde(pdpe, va);
3859 if ((*pde & PG_V) != 0) {
3860 if ((*pde & PG_PS) != 0) {
3861 pa = (*pde & PG_PS_FRAME) |
3862 (va & PDRMASK);
3863 } else {
3864 pte = pmap_pde_to_pte(pde, va);
3865 pa = (*pte & PG_FRAME) |
3866 (va & PAGE_MASK);
3867 }
3868 }
3869 }
3870 }
3871 PMAP_UNLOCK(pmap);
3872 return (pa);
3873 }
3874
3875 /*
3876 * Routine: pmap_extract_and_hold
3877 * Function:
3878 * Atomically extract and hold the physical page
3879 * with the given pmap and virtual address pair
3880 * if that mapping permits the given protection.
3881 */
3882 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)3883 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3884 {
3885 pdp_entry_t pdpe, *pdpep;
3886 pd_entry_t pde, *pdep;
3887 pt_entry_t pte, PG_RW, PG_V;
3888 vm_page_t m;
3889
3890 m = NULL;
3891 PG_RW = pmap_rw_bit(pmap);
3892 PG_V = pmap_valid_bit(pmap);
3893 PMAP_LOCK(pmap);
3894
3895 pdpep = pmap_pdpe(pmap, va);
3896 if (pdpep == NULL || ((pdpe = *pdpep) & PG_V) == 0)
3897 goto out;
3898 if ((pdpe & PG_PS) != 0) {
3899 if ((pdpe & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3900 goto out;
3901 m = PHYS_TO_VM_PAGE((pdpe & PG_PS_FRAME) | (va & PDPMASK));
3902 goto check_page;
3903 }
3904
3905 pdep = pmap_pdpe_to_pde(pdpep, va);
3906 if (pdep == NULL || ((pde = *pdep) & PG_V) == 0)
3907 goto out;
3908 if ((pde & PG_PS) != 0) {
3909 if ((pde & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3910 goto out;
3911 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK));
3912 goto check_page;
3913 }
3914
3915 pte = *pmap_pde_to_pte(pdep, va);
3916 if ((pte & PG_V) == 0 ||
3917 ((pte & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0))
3918 goto out;
3919 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3920
3921 check_page:
3922 if (m != NULL && !vm_page_wire_mapped(m))
3923 m = NULL;
3924 out:
3925 PMAP_UNLOCK(pmap);
3926 return (m);
3927 }
3928
3929 /*
3930 * Routine: pmap_kextract
3931 * Function:
3932 * Extract the physical page address associated with the given kernel
3933 * virtual address.
3934 */
3935 vm_paddr_t
pmap_kextract(vm_offset_t va)3936 pmap_kextract(vm_offset_t va)
3937 {
3938 pd_entry_t pde;
3939 vm_paddr_t pa;
3940
3941 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
3942 pa = DMAP_TO_PHYS(va);
3943 } else if (PMAP_ADDRESS_IN_LARGEMAP(va)) {
3944 pa = pmap_large_map_kextract(va);
3945 } else {
3946 pde = *vtopde(va);
3947 if (pde & PG_PS) {
3948 pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
3949 } else {
3950 /*
3951 * Beware of a concurrent promotion that changes the
3952 * PDE at this point! For example, vtopte() must not
3953 * be used to access the PTE because it would use the
3954 * new PDE. It is, however, safe to use the old PDE
3955 * because the page table page is preserved by the
3956 * promotion.
3957 */
3958 pa = *pmap_pde_to_pte(&pde, va);
3959 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3960 }
3961 }
3962 return (pa);
3963 }
3964
3965 /***************************************************
3966 * Low level mapping routines.....
3967 ***************************************************/
3968
3969 /*
3970 * Add a wired page to the kva.
3971 * Note: not SMP coherent.
3972 */
3973 void
pmap_kenter(vm_offset_t va,vm_paddr_t pa)3974 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
3975 {
3976 pt_entry_t *pte;
3977
3978 pte = vtopte(va);
3979 pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M |
3980 X86_PG_RW | X86_PG_V);
3981 }
3982
3983 static __inline void
pmap_kenter_attr(vm_offset_t va,vm_paddr_t pa,int mode)3984 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
3985 {
3986 pt_entry_t *pte;
3987 int cache_bits;
3988
3989 pte = vtopte(va);
3990 cache_bits = pmap_cache_bits(kernel_pmap, mode, false);
3991 pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M |
3992 X86_PG_RW | X86_PG_V | cache_bits);
3993 }
3994
3995 /*
3996 * Remove a page from the kernel pagetables.
3997 * Note: not SMP coherent.
3998 */
3999 void
pmap_kremove(vm_offset_t va)4000 pmap_kremove(vm_offset_t va)
4001 {
4002 pt_entry_t *pte;
4003
4004 pte = vtopte(va);
4005 pte_clear(pte);
4006 }
4007
4008 /*
4009 * Used to map a range of physical addresses into kernel
4010 * virtual address space.
4011 *
4012 * The value passed in '*virt' is a suggested virtual address for
4013 * the mapping. Architectures which can support a direct-mapped
4014 * physical to virtual region can return the appropriate address
4015 * within that region, leaving '*virt' unchanged. Other
4016 * architectures should map the pages starting at '*virt' and
4017 * update '*virt' with the first usable address after the mapped
4018 * region.
4019 */
4020 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)4021 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
4022 {
4023 return PHYS_TO_DMAP(start);
4024 }
4025
4026 /*
4027 * Add a list of wired pages to the kva
4028 * this routine is only used for temporary
4029 * kernel mappings that do not need to have
4030 * page modification or references recorded.
4031 * Note that old mappings are simply written
4032 * over. The page *must* be wired.
4033 * Note: SMP coherent. Uses a ranged shootdown IPI.
4034 */
4035 void
pmap_qenter(vm_offset_t sva,vm_page_t * ma,int count)4036 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
4037 {
4038 pt_entry_t *endpte, oldpte, pa, *pte;
4039 vm_page_t m;
4040 int cache_bits;
4041
4042 oldpte = 0;
4043 pte = vtopte(sva);
4044 endpte = pte + count;
4045 while (pte < endpte) {
4046 m = *ma++;
4047 cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, false);
4048 pa = VM_PAGE_TO_PHYS(m) | cache_bits;
4049 if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
4050 oldpte |= *pte;
4051 pte_store(pte, pa | pg_g | pg_nx | X86_PG_A |
4052 X86_PG_M | X86_PG_RW | X86_PG_V);
4053 }
4054 pte++;
4055 }
4056 if (__predict_false((oldpte & X86_PG_V) != 0))
4057 pmap_invalidate_range(kernel_pmap, sva, sva + count *
4058 PAGE_SIZE);
4059 }
4060
4061 /*
4062 * This routine tears out page mappings from the
4063 * kernel -- it is meant only for temporary mappings.
4064 * Note: SMP coherent. Uses a ranged shootdown IPI.
4065 */
4066 void
pmap_qremove(vm_offset_t sva,int count)4067 pmap_qremove(vm_offset_t sva, int count)
4068 {
4069 vm_offset_t va;
4070
4071 va = sva;
4072 while (count-- > 0) {
4073 KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
4074 pmap_kremove(va);
4075 va += PAGE_SIZE;
4076 }
4077 pmap_invalidate_range(kernel_pmap, sva, va);
4078 }
4079
4080 /***************************************************
4081 * Page table page management routines.....
4082 ***************************************************/
4083 /*
4084 * Schedule the specified unused page table page to be freed. Specifically,
4085 * add the page to the specified list of pages that will be released to the
4086 * physical memory manager after the TLB has been updated.
4087 */
4088 static __inline void
pmap_add_delayed_free_list(vm_page_t m,struct spglist * free,bool set_PG_ZERO)4089 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
4090 {
4091
4092 if (set_PG_ZERO)
4093 m->flags |= PG_ZERO;
4094 else
4095 m->flags &= ~PG_ZERO;
4096 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4097 }
4098
4099 /*
4100 * Inserts the specified page table page into the specified pmap's collection
4101 * of idle page table pages. Each of a pmap's page table pages is responsible
4102 * for mapping a distinct range of virtual addresses. The pmap's collection is
4103 * ordered by this virtual address range.
4104 *
4105 * If "promoted" is false, then the page table page "mpte" must be zero filled;
4106 * "mpte"'s valid field will be set to 0.
4107 *
4108 * If "promoted" is true and "allpte_PG_A_set" is false, then "mpte" must
4109 * contain valid mappings with identical attributes except for PG_A; "mpte"'s
4110 * valid field will be set to 1.
4111 *
4112 * If "promoted" and "allpte_PG_A_set" are both true, then "mpte" must contain
4113 * valid mappings with identical attributes including PG_A; "mpte"'s valid
4114 * field will be set to VM_PAGE_BITS_ALL.
4115 */
4116 static __inline int
pmap_insert_pt_page(pmap_t pmap,vm_page_t mpte,bool promoted,bool allpte_PG_A_set)4117 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
4118 bool allpte_PG_A_set)
4119 {
4120
4121 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4122 KASSERT(promoted || !allpte_PG_A_set,
4123 ("a zero-filled PTP can't have PG_A set in every PTE"));
4124 mpte->valid = promoted ? (allpte_PG_A_set ? VM_PAGE_BITS_ALL : 1) : 0;
4125 return (vm_radix_insert(&pmap->pm_root, mpte));
4126 }
4127
4128 /*
4129 * Removes the page table page mapping the specified virtual address from the
4130 * specified pmap's collection of idle page table pages, and returns it.
4131 * Otherwise, returns NULL if there is no page table page corresponding to the
4132 * specified virtual address.
4133 */
4134 static __inline vm_page_t
pmap_remove_pt_page(pmap_t pmap,vm_offset_t va)4135 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4136 {
4137
4138 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4139 return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
4140 }
4141
4142 /*
4143 * Decrements a page table page's reference count, which is used to record the
4144 * number of valid page table entries within the page. If the reference count
4145 * drops to zero, then the page table page is unmapped. Returns true if the
4146 * page table page was unmapped and false otherwise.
4147 */
4148 static inline bool
pmap_unwire_ptp(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)4149 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4150 {
4151
4152 --m->ref_count;
4153 if (m->ref_count == 0) {
4154 _pmap_unwire_ptp(pmap, va, m, free);
4155 return (true);
4156 } else
4157 return (false);
4158 }
4159
4160 static void
_pmap_unwire_ptp(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)4161 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4162 {
4163 pml5_entry_t *pml5;
4164 pml4_entry_t *pml4;
4165 pdp_entry_t *pdp;
4166 pd_entry_t *pd;
4167 vm_page_t pdpg, pdppg, pml4pg;
4168
4169 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4170
4171 /*
4172 * unmap the page table page
4173 */
4174 if (m->pindex >= NUPDE + NUPDPE + NUPML4E) {
4175 /* PML4 page */
4176 MPASS(pmap_is_la57(pmap));
4177 pml5 = pmap_pml5e(pmap, va);
4178 *pml5 = 0;
4179 if (pmap->pm_pmltopu != NULL && va <= VM_MAXUSER_ADDRESS) {
4180 pml5 = pmap_pml5e_u(pmap, va);
4181 *pml5 = 0;
4182 }
4183 } else if (m->pindex >= NUPDE + NUPDPE) {
4184 /* PDP page */
4185 pml4 = pmap_pml4e(pmap, va);
4186 *pml4 = 0;
4187 if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
4188 va <= VM_MAXUSER_ADDRESS) {
4189 pml4 = pmap_pml4e_u(pmap, va);
4190 *pml4 = 0;
4191 }
4192 } else if (m->pindex >= NUPDE) {
4193 /* PD page */
4194 pdp = pmap_pdpe(pmap, va);
4195 *pdp = 0;
4196 } else {
4197 /* PTE page */
4198 pd = pmap_pde(pmap, va);
4199 *pd = 0;
4200 }
4201 if (m->pindex < NUPDE) {
4202 /* We just released a PT, unhold the matching PD */
4203 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
4204 pmap_unwire_ptp(pmap, va, pdpg, free);
4205 } else if (m->pindex < NUPDE + NUPDPE) {
4206 /* We just released a PD, unhold the matching PDP */
4207 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
4208 pmap_unwire_ptp(pmap, va, pdppg, free);
4209 } else if (m->pindex < NUPDE + NUPDPE + NUPML4E && pmap_is_la57(pmap)) {
4210 /* We just released a PDP, unhold the matching PML4 */
4211 pml4pg = PHYS_TO_VM_PAGE(*pmap_pml5e(pmap, va) & PG_FRAME);
4212 pmap_unwire_ptp(pmap, va, pml4pg, free);
4213 }
4214
4215 pmap_pt_page_count_adj(pmap, -1);
4216
4217 /*
4218 * Put page on a list so that it is released after
4219 * *ALL* TLB shootdown is done
4220 */
4221 pmap_add_delayed_free_list(m, free, true);
4222 }
4223
4224 /*
4225 * After removing a page table entry, this routine is used to
4226 * conditionally free the page, and manage the reference count.
4227 */
4228 static int
pmap_unuse_pt(pmap_t pmap,vm_offset_t va,pd_entry_t ptepde,struct spglist * free)4229 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
4230 struct spglist *free)
4231 {
4232 vm_page_t mpte;
4233
4234 if (va >= VM_MAXUSER_ADDRESS)
4235 return (0);
4236 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4237 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4238 return (pmap_unwire_ptp(pmap, va, mpte, free));
4239 }
4240
4241 /*
4242 * Release a page table page reference after a failed attempt to create a
4243 * mapping.
4244 */
4245 static void
pmap_abort_ptp(pmap_t pmap,vm_offset_t va,vm_page_t mpte)4246 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
4247 {
4248 struct spglist free;
4249
4250 SLIST_INIT(&free);
4251 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
4252 /*
4253 * Although "va" was never mapped, paging-structure caches
4254 * could nonetheless have entries that refer to the freed
4255 * page table pages. Invalidate those entries.
4256 */
4257 pmap_invalidate_page(pmap, va);
4258 vm_page_free_pages_toq(&free, true);
4259 }
4260 }
4261
4262 static void
pmap_pinit_pcids(pmap_t pmap,uint32_t pcid,int gen)4263 pmap_pinit_pcids(pmap_t pmap, uint32_t pcid, int gen)
4264 {
4265 struct pmap_pcid *pcidp;
4266 int i;
4267
4268 CPU_FOREACH(i) {
4269 pcidp = zpcpu_get_cpu(pmap->pm_pcidp, i);
4270 pcidp->pm_pcid = pcid;
4271 pcidp->pm_gen = gen;
4272 }
4273 }
4274
4275 void
pmap_pinit0(pmap_t pmap)4276 pmap_pinit0(pmap_t pmap)
4277 {
4278 struct proc *p;
4279 struct thread *td;
4280
4281 PMAP_LOCK_INIT(pmap);
4282 pmap->pm_pmltop = kernel_pmap->pm_pmltop;
4283 pmap->pm_pmltopu = NULL;
4284 pmap->pm_cr3 = kernel_pmap->pm_cr3;
4285 /* hack to keep pmap_pti_pcid_invalidate() alive */
4286 pmap->pm_ucr3 = PMAP_NO_CR3;
4287 vm_radix_init(&pmap->pm_root);
4288 CPU_ZERO(&pmap->pm_active);
4289 TAILQ_INIT(&pmap->pm_pvchunk);
4290 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4291 pmap->pm_flags = pmap_flags;
4292 pmap->pm_pcidp = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK);
4293 pmap_pinit_pcids(pmap, PMAP_PCID_KERN + 1, 1);
4294 pmap_activate_boot(pmap);
4295 td = curthread;
4296 if (pti) {
4297 p = td->td_proc;
4298 PROC_LOCK(p);
4299 p->p_md.md_flags |= P_MD_KPTI;
4300 PROC_UNLOCK(p);
4301 }
4302 pmap_thread_init_invl_gen(td);
4303
4304 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4305 pmap_pkru_ranges_zone = uma_zcreate("pkru ranges",
4306 sizeof(struct pmap_pkru_range), NULL, NULL, NULL, NULL,
4307 UMA_ALIGN_PTR, 0);
4308 }
4309 }
4310
4311 void
pmap_pinit_pml4(vm_page_t pml4pg)4312 pmap_pinit_pml4(vm_page_t pml4pg)
4313 {
4314 pml4_entry_t *pm_pml4;
4315 int i;
4316
4317 pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
4318
4319 /* Wire in kernel global address entries. */
4320 for (i = 0; i < NKPML4E; i++) {
4321 pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
4322 X86_PG_V;
4323 }
4324 #ifdef KASAN
4325 for (i = 0; i < NKASANPML4E; i++) {
4326 pm_pml4[KASANPML4I + i] = (KASANPDPphys + ptoa(i)) | X86_PG_RW |
4327 X86_PG_V | pg_nx;
4328 }
4329 #endif
4330 #ifdef KMSAN
4331 for (i = 0; i < NKMSANSHADPML4E; i++) {
4332 pm_pml4[KMSANSHADPML4I + i] = (KMSANSHADPDPphys + ptoa(i)) |
4333 X86_PG_RW | X86_PG_V | pg_nx;
4334 }
4335 for (i = 0; i < NKMSANORIGPML4E; i++) {
4336 pm_pml4[KMSANORIGPML4I + i] = (KMSANORIGPDPphys + ptoa(i)) |
4337 X86_PG_RW | X86_PG_V | pg_nx;
4338 }
4339 #endif
4340 for (i = 0; i < ndmpdpphys; i++) {
4341 pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
4342 X86_PG_V;
4343 }
4344
4345 /* install self-referential address mapping entry(s) */
4346 pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
4347 X86_PG_A | X86_PG_M;
4348
4349 /* install large map entries if configured */
4350 for (i = 0; i < lm_ents; i++)
4351 pm_pml4[LMSPML4I + i] = kernel_pmap->pm_pmltop[LMSPML4I + i];
4352 }
4353
4354 void
pmap_pinit_pml5(vm_page_t pml5pg)4355 pmap_pinit_pml5(vm_page_t pml5pg)
4356 {
4357 pml5_entry_t *pm_pml5;
4358
4359 pm_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pg));
4360
4361 /*
4362 * Add pml5 entry at top of KVA pointing to existing pml4 table,
4363 * entering all existing kernel mappings into level 5 table.
4364 */
4365 pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
4366 X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
4367 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false);
4368
4369 /*
4370 * Install self-referential address mapping entry.
4371 */
4372 pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) |
4373 X86_PG_RW | X86_PG_V | X86_PG_M | X86_PG_A |
4374 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false);
4375 }
4376
4377 static void
pmap_pinit_pml4_pti(vm_page_t pml4pgu)4378 pmap_pinit_pml4_pti(vm_page_t pml4pgu)
4379 {
4380 pml4_entry_t *pm_pml4u;
4381 int i;
4382
4383 pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pgu));
4384 for (i = 0; i < NPML4EPG; i++)
4385 pm_pml4u[i] = pti_pml4[i];
4386 }
4387
4388 static void
pmap_pinit_pml5_pti(vm_page_t pml5pgu)4389 pmap_pinit_pml5_pti(vm_page_t pml5pgu)
4390 {
4391 pml5_entry_t *pm_pml5u;
4392
4393 pm_pml5u = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pgu));
4394 pagezero(pm_pml5u);
4395
4396 /*
4397 * Add pml5 entry at top of KVA pointing to existing pml4 pti
4398 * table, entering all kernel mappings needed for usermode
4399 * into level 5 table.
4400 */
4401 pm_pml5u[pmap_pml5e_index(UPT_MAX_ADDRESS)] =
4402 pmap_kextract((vm_offset_t)pti_pml4) |
4403 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
4404 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false);
4405 }
4406
4407 /* Allocate a page table page and do related bookkeeping */
4408 static vm_page_t
pmap_alloc_pt_page(pmap_t pmap,vm_pindex_t pindex,int flags)4409 pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags)
4410 {
4411 vm_page_t m;
4412
4413 m = vm_page_alloc_noobj(flags);
4414 if (__predict_false(m == NULL))
4415 return (NULL);
4416 m->pindex = pindex;
4417 pmap_pt_page_count_adj(pmap, 1);
4418 return (m);
4419 }
4420
4421 static void
pmap_free_pt_page(pmap_t pmap,vm_page_t m,bool zerofilled)4422 pmap_free_pt_page(pmap_t pmap, vm_page_t m, bool zerofilled)
4423 {
4424 /*
4425 * This function assumes the page will need to be unwired,
4426 * even though the counterpart allocation in pmap_alloc_pt_page()
4427 * doesn't enforce VM_ALLOC_WIRED. However, all current uses
4428 * of pmap_free_pt_page() require unwiring. The case in which
4429 * a PT page doesn't require unwiring because its ref_count has
4430 * naturally reached 0 is handled through _pmap_unwire_ptp().
4431 */
4432 vm_page_unwire_noq(m);
4433 if (zerofilled)
4434 vm_page_free_zero(m);
4435 else
4436 vm_page_free(m);
4437
4438 pmap_pt_page_count_adj(pmap, -1);
4439 }
4440
4441 _Static_assert(sizeof(struct pmap_pcid) == 8, "Fix pcpu zone for pm_pcidp");
4442
4443 /*
4444 * Initialize a preallocated and zeroed pmap structure,
4445 * such as one in a vmspace structure.
4446 */
4447 int
pmap_pinit_type(pmap_t pmap,enum pmap_type pm_type,int flags)4448 pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
4449 {
4450 vm_page_t pmltop_pg, pmltop_pgu;
4451 vm_paddr_t pmltop_phys;
4452
4453 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4454
4455 /*
4456 * Allocate the page directory page. Pass NULL instead of a
4457 * pointer to the pmap here to avoid calling
4458 * pmap_resident_count_adj() through pmap_pt_page_count_adj(),
4459 * since that requires pmap lock. Instead do the accounting
4460 * manually.
4461 *
4462 * Note that final call to pmap_remove() optimization that
4463 * checks for zero resident_count is basically disabled by
4464 * accounting for top-level page. But the optimization was
4465 * not effective since we started using non-managed mapping of
4466 * the shared page.
4467 */
4468 pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_WIRED | VM_ALLOC_ZERO |
4469 VM_ALLOC_WAITOK);
4470 pmap_pt_page_count_pinit(pmap, 1);
4471
4472 pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg);
4473 pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys);
4474
4475 if (pmap_pcid_enabled) {
4476 if (pmap->pm_pcidp == NULL)
4477 pmap->pm_pcidp = uma_zalloc_pcpu(pcpu_zone_8,
4478 M_WAITOK);
4479 pmap_pinit_pcids(pmap, PMAP_PCID_NONE, 0);
4480 }
4481 pmap->pm_cr3 = PMAP_NO_CR3; /* initialize to an invalid value */
4482 pmap->pm_ucr3 = PMAP_NO_CR3;
4483 pmap->pm_pmltopu = NULL;
4484
4485 pmap->pm_type = pm_type;
4486
4487 /*
4488 * Do not install the host kernel mappings in the nested page
4489 * tables. These mappings are meaningless in the guest physical
4490 * address space.
4491 * Install minimal kernel mappings in PTI case.
4492 */
4493 switch (pm_type) {
4494 case PT_X86:
4495 pmap->pm_cr3 = pmltop_phys;
4496 if (pmap_is_la57(pmap))
4497 pmap_pinit_pml5(pmltop_pg);
4498 else
4499 pmap_pinit_pml4(pmltop_pg);
4500 if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
4501 /*
4502 * As with pmltop_pg, pass NULL instead of a
4503 * pointer to the pmap to ensure that the PTI
4504 * page counted explicitly.
4505 */
4506 pmltop_pgu = pmap_alloc_pt_page(NULL, 0,
4507 VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
4508 pmap_pt_page_count_pinit(pmap, 1);
4509 pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP(
4510 VM_PAGE_TO_PHYS(pmltop_pgu));
4511 if (pmap_is_la57(pmap))
4512 pmap_pinit_pml5_pti(pmltop_pgu);
4513 else
4514 pmap_pinit_pml4_pti(pmltop_pgu);
4515 pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pmltop_pgu);
4516 }
4517 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4518 rangeset_init(&pmap->pm_pkru, pkru_dup_range,
4519 pkru_free_range, pmap, M_NOWAIT);
4520 }
4521 break;
4522 case PT_EPT:
4523 case PT_RVI:
4524 pmap->pm_eptsmr = smr_create("pmap", 0, 0);
4525 break;
4526 }
4527
4528 vm_radix_init(&pmap->pm_root);
4529 CPU_ZERO(&pmap->pm_active);
4530 TAILQ_INIT(&pmap->pm_pvchunk);
4531 pmap->pm_flags = flags;
4532 pmap->pm_eptgen = 0;
4533
4534 return (1);
4535 }
4536
4537 int
pmap_pinit(pmap_t pmap)4538 pmap_pinit(pmap_t pmap)
4539 {
4540
4541 return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
4542 }
4543
4544 static void
pmap_allocpte_free_unref(pmap_t pmap,vm_offset_t va,pt_entry_t * pte)4545 pmap_allocpte_free_unref(pmap_t pmap, vm_offset_t va, pt_entry_t *pte)
4546 {
4547 vm_page_t mpg;
4548 struct spglist free;
4549
4550 mpg = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
4551 if (mpg->ref_count != 0)
4552 return;
4553 SLIST_INIT(&free);
4554 _pmap_unwire_ptp(pmap, va, mpg, &free);
4555 pmap_invalidate_page(pmap, va);
4556 vm_page_free_pages_toq(&free, true);
4557 }
4558
4559 static pml4_entry_t *
pmap_allocpte_getpml4(pmap_t pmap,struct rwlock ** lockp,vm_offset_t va,bool addref)4560 pmap_allocpte_getpml4(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4561 bool addref)
4562 {
4563 vm_pindex_t pml5index;
4564 pml5_entry_t *pml5;
4565 pml4_entry_t *pml4;
4566 vm_page_t pml4pg;
4567 pt_entry_t PG_V;
4568 bool allocated;
4569
4570 if (!pmap_is_la57(pmap))
4571 return (&pmap->pm_pmltop[pmap_pml4e_index(va)]);
4572
4573 PG_V = pmap_valid_bit(pmap);
4574 pml5index = pmap_pml5e_index(va);
4575 pml5 = &pmap->pm_pmltop[pml5index];
4576 if ((*pml5 & PG_V) == 0) {
4577 if (pmap_allocpte_nosleep(pmap, pmap_pml5e_pindex(va), lockp,
4578 va) == NULL)
4579 return (NULL);
4580 allocated = true;
4581 } else {
4582 allocated = false;
4583 }
4584 pml4 = (pml4_entry_t *)PHYS_TO_DMAP(*pml5 & PG_FRAME);
4585 pml4 = &pml4[pmap_pml4e_index(va)];
4586 if ((*pml4 & PG_V) == 0) {
4587 pml4pg = PHYS_TO_VM_PAGE(*pml5 & PG_FRAME);
4588 if (allocated && !addref)
4589 pml4pg->ref_count--;
4590 else if (!allocated && addref)
4591 pml4pg->ref_count++;
4592 }
4593 return (pml4);
4594 }
4595
4596 static pdp_entry_t *
pmap_allocpte_getpdp(pmap_t pmap,struct rwlock ** lockp,vm_offset_t va,bool addref)4597 pmap_allocpte_getpdp(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4598 bool addref)
4599 {
4600 vm_page_t pdppg;
4601 pml4_entry_t *pml4;
4602 pdp_entry_t *pdp;
4603 pt_entry_t PG_V;
4604 bool allocated;
4605
4606 PG_V = pmap_valid_bit(pmap);
4607
4608 pml4 = pmap_allocpte_getpml4(pmap, lockp, va, false);
4609 if (pml4 == NULL)
4610 return (NULL);
4611
4612 if ((*pml4 & PG_V) == 0) {
4613 /* Have to allocate a new pdp, recurse */
4614 if (pmap_allocpte_nosleep(pmap, pmap_pml4e_pindex(va), lockp,
4615 va) == NULL) {
4616 if (pmap_is_la57(pmap))
4617 pmap_allocpte_free_unref(pmap, va,
4618 pmap_pml5e(pmap, va));
4619 return (NULL);
4620 }
4621 allocated = true;
4622 } else {
4623 allocated = false;
4624 }
4625 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
4626 pdp = &pdp[pmap_pdpe_index(va)];
4627 if ((*pdp & PG_V) == 0) {
4628 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
4629 if (allocated && !addref)
4630 pdppg->ref_count--;
4631 else if (!allocated && addref)
4632 pdppg->ref_count++;
4633 }
4634 return (pdp);
4635 }
4636
4637 /*
4638 * The ptepindexes, i.e. page indices, of the page table pages encountered
4639 * while translating virtual address va are defined as follows:
4640 * - for the page table page (last level),
4641 * ptepindex = pmap_pde_pindex(va) = va >> PDRSHIFT,
4642 * in other words, it is just the index of the PDE that maps the page
4643 * table page.
4644 * - for the page directory page,
4645 * ptepindex = NUPDE (number of userland PD entries) +
4646 * (pmap_pde_index(va) >> NPDEPGSHIFT)
4647 * i.e. index of PDPE is put after the last index of PDE,
4648 * - for the page directory pointer page,
4649 * ptepindex = NUPDE + NUPDPE + (pmap_pde_index(va) >> (NPDEPGSHIFT +
4650 * NPML4EPGSHIFT),
4651 * i.e. index of pml4e is put after the last index of PDPE,
4652 * - for the PML4 page (if LA57 mode is enabled),
4653 * ptepindex = NUPDE + NUPDPE + NUPML4E + (pmap_pde_index(va) >>
4654 * (NPDEPGSHIFT + NPML4EPGSHIFT + NPML5EPGSHIFT),
4655 * i.e. index of pml5e is put after the last index of PML4E.
4656 *
4657 * Define an order on the paging entries, where all entries of the
4658 * same height are put together, then heights are put from deepest to
4659 * root. Then ptexpindex is the sequential number of the
4660 * corresponding paging entry in this order.
4661 *
4662 * The values of NUPDE, NUPDPE, and NUPML4E are determined by the size of
4663 * LA57 paging structures even in LA48 paging mode. Moreover, the
4664 * ptepindexes are calculated as if the paging structures were 5-level
4665 * regardless of the actual mode of operation.
4666 *
4667 * The root page at PML4/PML5 does not participate in this indexing scheme,
4668 * since it is statically allocated by pmap_pinit() and not by pmap_allocpte().
4669 */
4670 static vm_page_t
pmap_allocpte_nosleep(pmap_t pmap,vm_pindex_t ptepindex,struct rwlock ** lockp,vm_offset_t va)4671 pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4672 vm_offset_t va)
4673 {
4674 vm_pindex_t pml5index, pml4index;
4675 pml5_entry_t *pml5, *pml5u;
4676 pml4_entry_t *pml4, *pml4u;
4677 pdp_entry_t *pdp;
4678 pd_entry_t *pd;
4679 vm_page_t m, pdpg;
4680 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
4681
4682 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4683
4684 PG_A = pmap_accessed_bit(pmap);
4685 PG_M = pmap_modified_bit(pmap);
4686 PG_V = pmap_valid_bit(pmap);
4687 PG_RW = pmap_rw_bit(pmap);
4688
4689 /*
4690 * Allocate a page table page.
4691 */
4692 m = pmap_alloc_pt_page(pmap, ptepindex,
4693 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
4694 if (m == NULL)
4695 return (NULL);
4696
4697 /*
4698 * Map the pagetable page into the process address space, if
4699 * it isn't already there.
4700 */
4701 if (ptepindex >= NUPDE + NUPDPE + NUPML4E) {
4702 MPASS(pmap_is_la57(pmap));
4703
4704 pml5index = pmap_pml5e_index(va);
4705 pml5 = &pmap->pm_pmltop[pml5index];
4706 KASSERT((*pml5 & PG_V) == 0,
4707 ("pmap %p va %#lx pml5 %#lx", pmap, va, *pml5));
4708 *pml5 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4709
4710 if (pmap->pm_pmltopu != NULL && pml5index < NUPML5E) {
4711 MPASS(pmap->pm_ucr3 != PMAP_NO_CR3);
4712 *pml5 |= pg_nx;
4713
4714 pml5u = &pmap->pm_pmltopu[pml5index];
4715 *pml5u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4716 PG_A | PG_M;
4717 }
4718 } else if (ptepindex >= NUPDE + NUPDPE) {
4719 pml4index = pmap_pml4e_index(va);
4720 /* Wire up a new PDPE page */
4721 pml4 = pmap_allocpte_getpml4(pmap, lockp, va, true);
4722 if (pml4 == NULL) {
4723 pmap_free_pt_page(pmap, m, true);
4724 return (NULL);
4725 }
4726 KASSERT((*pml4 & PG_V) == 0,
4727 ("pmap %p va %#lx pml4 %#lx", pmap, va, *pml4));
4728 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4729
4730 if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
4731 pml4index < NUPML4E) {
4732 MPASS(pmap->pm_ucr3 != PMAP_NO_CR3);
4733
4734 /*
4735 * PTI: Make all user-space mappings in the
4736 * kernel-mode page table no-execute so that
4737 * we detect any programming errors that leave
4738 * the kernel-mode page table active on return
4739 * to user space.
4740 */
4741 *pml4 |= pg_nx;
4742
4743 pml4u = &pmap->pm_pmltopu[pml4index];
4744 *pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4745 PG_A | PG_M;
4746 }
4747 } else if (ptepindex >= NUPDE) {
4748 /* Wire up a new PDE page */
4749 pdp = pmap_allocpte_getpdp(pmap, lockp, va, true);
4750 if (pdp == NULL) {
4751 pmap_free_pt_page(pmap, m, true);
4752 return (NULL);
4753 }
4754 KASSERT((*pdp & PG_V) == 0,
4755 ("pmap %p va %#lx pdp %#lx", pmap, va, *pdp));
4756 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4757 } else {
4758 /* Wire up a new PTE page */
4759 pdp = pmap_allocpte_getpdp(pmap, lockp, va, false);
4760 if (pdp == NULL) {
4761 pmap_free_pt_page(pmap, m, true);
4762 return (NULL);
4763 }
4764 if ((*pdp & PG_V) == 0) {
4765 /* Have to allocate a new pd, recurse */
4766 if (pmap_allocpte_nosleep(pmap, pmap_pdpe_pindex(va),
4767 lockp, va) == NULL) {
4768 pmap_allocpte_free_unref(pmap, va,
4769 pmap_pml4e(pmap, va));
4770 pmap_free_pt_page(pmap, m, true);
4771 return (NULL);
4772 }
4773 } else {
4774 /* Add reference to the pd page */
4775 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
4776 pdpg->ref_count++;
4777 }
4778 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
4779
4780 /* Now we know where the page directory page is */
4781 pd = &pd[pmap_pde_index(va)];
4782 KASSERT((*pd & PG_V) == 0,
4783 ("pmap %p va %#lx pd %#lx", pmap, va, *pd));
4784 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4785 }
4786
4787 return (m);
4788 }
4789
4790 /*
4791 * This routine is called if the desired page table page does not exist.
4792 *
4793 * If page table page allocation fails, this routine may sleep before
4794 * returning NULL. It sleeps only if a lock pointer was given. Sleep
4795 * occurs right before returning to the caller. This way, we never
4796 * drop pmap lock to sleep while a page table page has ref_count == 0,
4797 * which prevents the page from being freed under us.
4798 */
4799 static vm_page_t
pmap_allocpte_alloc(pmap_t pmap,vm_pindex_t ptepindex,struct rwlock ** lockp,vm_offset_t va)4800 pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4801 vm_offset_t va)
4802 {
4803 vm_page_t m;
4804
4805 m = pmap_allocpte_nosleep(pmap, ptepindex, lockp, va);
4806 if (m == NULL && lockp != NULL) {
4807 RELEASE_PV_LIST_LOCK(lockp);
4808 PMAP_UNLOCK(pmap);
4809 PMAP_ASSERT_NOT_IN_DI();
4810 vm_wait(NULL);
4811 PMAP_LOCK(pmap);
4812 }
4813 return (m);
4814 }
4815
4816 static pd_entry_t *
pmap_alloc_pde(pmap_t pmap,vm_offset_t va,vm_page_t * pdpgp,struct rwlock ** lockp)4817 pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
4818 struct rwlock **lockp)
4819 {
4820 pdp_entry_t *pdpe, PG_V;
4821 pd_entry_t *pde;
4822 vm_page_t pdpg;
4823 vm_pindex_t pdpindex;
4824
4825 PG_V = pmap_valid_bit(pmap);
4826
4827 retry:
4828 pdpe = pmap_pdpe(pmap, va);
4829 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
4830 pde = pmap_pdpe_to_pde(pdpe, va);
4831 if (va < VM_MAXUSER_ADDRESS) {
4832 /* Add a reference to the pd page. */
4833 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
4834 pdpg->ref_count++;
4835 } else
4836 pdpg = NULL;
4837 } else if (va < VM_MAXUSER_ADDRESS) {
4838 /* Allocate a pd page. */
4839 pdpindex = pmap_pde_pindex(va) >> NPDPEPGSHIFT;
4840 pdpg = pmap_allocpte_alloc(pmap, NUPDE + pdpindex, lockp, va);
4841 if (pdpg == NULL) {
4842 if (lockp != NULL)
4843 goto retry;
4844 else
4845 return (NULL);
4846 }
4847 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4848 pde = &pde[pmap_pde_index(va)];
4849 } else
4850 panic("pmap_alloc_pde: missing page table page for va %#lx",
4851 va);
4852 *pdpgp = pdpg;
4853 return (pde);
4854 }
4855
4856 static vm_page_t
pmap_allocpte(pmap_t pmap,vm_offset_t va,struct rwlock ** lockp)4857 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4858 {
4859 vm_pindex_t ptepindex;
4860 pd_entry_t *pd, PG_V;
4861 vm_page_t m;
4862
4863 PG_V = pmap_valid_bit(pmap);
4864
4865 /*
4866 * Calculate pagetable page index
4867 */
4868 ptepindex = pmap_pde_pindex(va);
4869 retry:
4870 /*
4871 * Get the page directory entry
4872 */
4873 pd = pmap_pde(pmap, va);
4874
4875 /*
4876 * This supports switching from a 2MB page to a
4877 * normal 4K page.
4878 */
4879 if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
4880 if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
4881 /*
4882 * Invalidation of the 2MB page mapping may have caused
4883 * the deallocation of the underlying PD page.
4884 */
4885 pd = NULL;
4886 }
4887 }
4888
4889 /*
4890 * If the page table page is mapped, we just increment the
4891 * hold count, and activate it.
4892 */
4893 if (pd != NULL && (*pd & PG_V) != 0) {
4894 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
4895 m->ref_count++;
4896 } else {
4897 /*
4898 * Here if the pte page isn't mapped, or if it has been
4899 * deallocated.
4900 */
4901 m = pmap_allocpte_alloc(pmap, ptepindex, lockp, va);
4902 if (m == NULL && lockp != NULL)
4903 goto retry;
4904 }
4905 return (m);
4906 }
4907
4908 /***************************************************
4909 * Pmap allocation/deallocation routines.
4910 ***************************************************/
4911
4912 /*
4913 * Release any resources held by the given physical map.
4914 * Called when a pmap initialized by pmap_pinit is being released.
4915 * Should only be called if the map contains no valid mappings.
4916 */
4917 void
pmap_release(pmap_t pmap)4918 pmap_release(pmap_t pmap)
4919 {
4920 vm_page_t m;
4921 int i;
4922
4923 KASSERT(vm_radix_is_empty(&pmap->pm_root),
4924 ("pmap_release: pmap %p has reserved page table page(s)",
4925 pmap));
4926 KASSERT(CPU_EMPTY(&pmap->pm_active),
4927 ("releasing active pmap %p", pmap));
4928
4929 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pmltop));
4930
4931 if (pmap_is_la57(pmap)) {
4932 pmap->pm_pmltop[pmap_pml5e_index(UPT_MAX_ADDRESS)] = 0;
4933 pmap->pm_pmltop[PML5PML5I] = 0;
4934 } else {
4935 for (i = 0; i < NKPML4E; i++) /* KVA */
4936 pmap->pm_pmltop[KPML4BASE + i] = 0;
4937 #ifdef KASAN
4938 for (i = 0; i < NKASANPML4E; i++) /* KASAN shadow map */
4939 pmap->pm_pmltop[KASANPML4I + i] = 0;
4940 #endif
4941 #ifdef KMSAN
4942 for (i = 0; i < NKMSANSHADPML4E; i++) /* KMSAN shadow map */
4943 pmap->pm_pmltop[KMSANSHADPML4I + i] = 0;
4944 for (i = 0; i < NKMSANORIGPML4E; i++) /* KMSAN shadow map */
4945 pmap->pm_pmltop[KMSANORIGPML4I + i] = 0;
4946 #endif
4947 for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
4948 pmap->pm_pmltop[DMPML4I + i] = 0;
4949 pmap->pm_pmltop[PML4PML4I] = 0; /* Recursive Mapping */
4950 for (i = 0; i < lm_ents; i++) /* Large Map */
4951 pmap->pm_pmltop[LMSPML4I + i] = 0;
4952 }
4953
4954 pmap_free_pt_page(NULL, m, true);
4955 pmap_pt_page_count_pinit(pmap, -1);
4956
4957 if (pmap->pm_pmltopu != NULL) {
4958 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->
4959 pm_pmltopu));
4960 pmap_free_pt_page(NULL, m, false);
4961 pmap_pt_page_count_pinit(pmap, -1);
4962 }
4963 if (pmap->pm_type == PT_X86 &&
4964 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
4965 rangeset_fini(&pmap->pm_pkru);
4966
4967 KASSERT(pmap->pm_stats.resident_count == 0,
4968 ("pmap_release: pmap %p resident count %ld != 0",
4969 pmap, pmap->pm_stats.resident_count));
4970 }
4971
4972 static int
kvm_size(SYSCTL_HANDLER_ARGS)4973 kvm_size(SYSCTL_HANDLER_ARGS)
4974 {
4975 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
4976
4977 return sysctl_handle_long(oidp, &ksize, 0, req);
4978 }
4979 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4980 0, 0, kvm_size, "LU",
4981 "Size of KVM");
4982
4983 static int
kvm_free(SYSCTL_HANDLER_ARGS)4984 kvm_free(SYSCTL_HANDLER_ARGS)
4985 {
4986 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
4987
4988 return sysctl_handle_long(oidp, &kfree, 0, req);
4989 }
4990 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4991 0, 0, kvm_free, "LU",
4992 "Amount of KVM free");
4993
4994 #ifdef KMSAN
4995 static void
pmap_kmsan_shadow_map_page_array(vm_paddr_t pdppa,vm_size_t size)4996 pmap_kmsan_shadow_map_page_array(vm_paddr_t pdppa, vm_size_t size)
4997 {
4998 pdp_entry_t *pdpe;
4999 pd_entry_t *pde;
5000 pt_entry_t *pte;
5001 vm_paddr_t dummypa, dummypd, dummypt;
5002 int i, npde, npdpg;
5003
5004 npdpg = howmany(size, NBPDP);
5005 npde = size / NBPDR;
5006
5007 dummypa = vm_phys_early_alloc(-1, PAGE_SIZE);
5008 pagezero((void *)PHYS_TO_DMAP(dummypa));
5009
5010 dummypt = vm_phys_early_alloc(-1, PAGE_SIZE);
5011 pagezero((void *)PHYS_TO_DMAP(dummypt));
5012 dummypd = vm_phys_early_alloc(-1, PAGE_SIZE * npdpg);
5013 for (i = 0; i < npdpg; i++)
5014 pagezero((void *)PHYS_TO_DMAP(dummypd + ptoa(i)));
5015
5016 pte = (pt_entry_t *)PHYS_TO_DMAP(dummypt);
5017 for (i = 0; i < NPTEPG; i++)
5018 pte[i] = (pt_entry_t)(dummypa | X86_PG_V | X86_PG_RW |
5019 X86_PG_A | X86_PG_M | pg_nx);
5020
5021 pde = (pd_entry_t *)PHYS_TO_DMAP(dummypd);
5022 for (i = 0; i < npde; i++)
5023 pde[i] = (pd_entry_t)(dummypt | X86_PG_V | X86_PG_RW | pg_nx);
5024
5025 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(pdppa);
5026 for (i = 0; i < npdpg; i++)
5027 pdpe[i] = (pdp_entry_t)(dummypd + ptoa(i) | X86_PG_V |
5028 X86_PG_RW | pg_nx);
5029 }
5030
5031 static void
pmap_kmsan_page_array_startup(vm_offset_t start,vm_offset_t end)5032 pmap_kmsan_page_array_startup(vm_offset_t start, vm_offset_t end)
5033 {
5034 vm_size_t size;
5035
5036 KASSERT(start % NBPDP == 0, ("unaligned page array start address"));
5037
5038 /*
5039 * The end of the page array's KVA region is 2MB aligned, see
5040 * kmem_init().
5041 */
5042 size = round_2mpage(end) - start;
5043 pmap_kmsan_shadow_map_page_array(KMSANSHADPDPphys, size);
5044 pmap_kmsan_shadow_map_page_array(KMSANORIGPDPphys, size);
5045 }
5046 #endif
5047
5048 /*
5049 * Allocate physical memory for the vm_page array and map it into KVA,
5050 * attempting to back the vm_pages with domain-local memory.
5051 */
5052 void
pmap_page_array_startup(long pages)5053 pmap_page_array_startup(long pages)
5054 {
5055 pdp_entry_t *pdpe;
5056 pd_entry_t *pde, newpdir;
5057 vm_offset_t va, start, end;
5058 vm_paddr_t pa;
5059 long pfn;
5060 int domain, i;
5061
5062 vm_page_array_size = pages;
5063
5064 start = VM_MIN_KERNEL_ADDRESS;
5065 end = start + pages * sizeof(struct vm_page);
5066 for (va = start; va < end; va += NBPDR) {
5067 pfn = first_page + (va - start) / sizeof(struct vm_page);
5068 domain = vm_phys_domain(ptoa(pfn));
5069 pdpe = pmap_pdpe(kernel_pmap, va);
5070 if ((*pdpe & X86_PG_V) == 0) {
5071 pa = vm_phys_early_alloc(domain, PAGE_SIZE);
5072 dump_add_page(pa);
5073 pagezero((void *)PHYS_TO_DMAP(pa));
5074 *pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW |
5075 X86_PG_A | X86_PG_M);
5076 }
5077 pde = pmap_pdpe_to_pde(pdpe, va);
5078 if ((*pde & X86_PG_V) != 0)
5079 panic("Unexpected pde");
5080 pa = vm_phys_early_alloc(domain, NBPDR);
5081 for (i = 0; i < NPDEPG; i++)
5082 dump_add_page(pa + i * PAGE_SIZE);
5083 newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A |
5084 X86_PG_M | PG_PS | pg_g | pg_nx);
5085 pde_store(pde, newpdir);
5086 }
5087 vm_page_array = (vm_page_t)start;
5088
5089 #ifdef KMSAN
5090 pmap_kmsan_page_array_startup(start, end);
5091 #endif
5092 }
5093
5094 /*
5095 * grow the number of kernel page table entries, if needed
5096 */
5097 void
pmap_growkernel(vm_offset_t addr)5098 pmap_growkernel(vm_offset_t addr)
5099 {
5100 vm_paddr_t paddr;
5101 vm_page_t nkpg;
5102 pd_entry_t *pde, newpdir;
5103 pdp_entry_t *pdpe;
5104 vm_offset_t end;
5105
5106 TSENTER();
5107 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
5108
5109 /*
5110 * The kernel map covers two distinct regions of KVA: that used
5111 * for dynamic kernel memory allocations, and the uppermost 2GB
5112 * of the virtual address space. The latter is used to map the
5113 * kernel and loadable kernel modules. This scheme enables the
5114 * use of a special code generation model for kernel code which
5115 * takes advantage of compact addressing modes in machine code.
5116 *
5117 * Both regions grow upwards; to avoid wasting memory, the gap
5118 * in between is unmapped. If "addr" is above "KERNBASE", the
5119 * kernel's region is grown, otherwise the kmem region is grown.
5120 *
5121 * The correctness of this action is based on the following
5122 * argument: vm_map_insert() allocates contiguous ranges of the
5123 * kernel virtual address space. It calls this function if a range
5124 * ends after "kernel_vm_end". If the kernel is mapped between
5125 * "kernel_vm_end" and "addr", then the range cannot begin at
5126 * "kernel_vm_end". In fact, its beginning address cannot be less
5127 * than the kernel. Thus, there is no immediate need to allocate
5128 * any new kernel page table pages between "kernel_vm_end" and
5129 * "KERNBASE".
5130 */
5131 if (KERNBASE < addr) {
5132 end = KERNBASE + nkpt * NBPDR;
5133 if (end == 0) {
5134 TSEXIT();
5135 return;
5136 }
5137 } else {
5138 end = kernel_vm_end;
5139 }
5140
5141 addr = roundup2(addr, NBPDR);
5142 if (addr - 1 >= vm_map_max(kernel_map))
5143 addr = vm_map_max(kernel_map);
5144 if (addr <= end) {
5145 /*
5146 * The grown region is already mapped, so there is
5147 * nothing to do.
5148 */
5149 TSEXIT();
5150 return;
5151 }
5152
5153 kasan_shadow_map(end, addr - end);
5154 kmsan_shadow_map(end, addr - end);
5155 while (end < addr) {
5156 pdpe = pmap_pdpe(kernel_pmap, end);
5157 if ((*pdpe & X86_PG_V) == 0) {
5158 nkpg = pmap_alloc_pt_page(kernel_pmap,
5159 pmap_pdpe_pindex(end), VM_ALLOC_INTERRUPT |
5160 VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
5161 if (nkpg == NULL)
5162 panic("pmap_growkernel: no memory to grow kernel");
5163 paddr = VM_PAGE_TO_PHYS(nkpg);
5164 *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
5165 X86_PG_A | X86_PG_M);
5166 continue; /* try again */
5167 }
5168 pde = pmap_pdpe_to_pde(pdpe, end);
5169 if ((*pde & X86_PG_V) != 0) {
5170 end = (end + NBPDR) & ~PDRMASK;
5171 if (end - 1 >= vm_map_max(kernel_map)) {
5172 end = vm_map_max(kernel_map);
5173 break;
5174 }
5175 continue;
5176 }
5177
5178 nkpg = pmap_alloc_pt_page(kernel_pmap, pmap_pde_pindex(end),
5179 VM_ALLOC_INTERRUPT | VM_ALLOC_NOFREE | VM_ALLOC_WIRED |
5180 VM_ALLOC_ZERO);
5181 if (nkpg == NULL)
5182 panic("pmap_growkernel: no memory to grow kernel");
5183 paddr = VM_PAGE_TO_PHYS(nkpg);
5184 newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
5185 pde_store(pde, newpdir);
5186
5187 end = (end + NBPDR) & ~PDRMASK;
5188 if (end - 1 >= vm_map_max(kernel_map)) {
5189 end = vm_map_max(kernel_map);
5190 break;
5191 }
5192 }
5193
5194 if (end <= KERNBASE)
5195 kernel_vm_end = end;
5196 else
5197 nkpt = howmany(end - KERNBASE, NBPDR);
5198 TSEXIT();
5199 }
5200
5201 /***************************************************
5202 * page management routines.
5203 ***************************************************/
5204
5205 static const uint64_t pc_freemask[_NPCM] = {
5206 [0 ... _NPCM - 2] = PC_FREEN,
5207 [_NPCM - 1] = PC_FREEL
5208 };
5209
5210 #ifdef PV_STATS
5211
5212 static COUNTER_U64_DEFINE_EARLY(pc_chunk_count);
5213 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
5214 &pc_chunk_count, "Current number of pv entry cnunks");
5215
5216 static COUNTER_U64_DEFINE_EARLY(pc_chunk_allocs);
5217 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
5218 &pc_chunk_allocs, "Total number of pv entry chunks allocated");
5219
5220 static COUNTER_U64_DEFINE_EARLY(pc_chunk_frees);
5221 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
5222 &pc_chunk_frees, "Total number of pv entry chunks freed");
5223
5224 static COUNTER_U64_DEFINE_EARLY(pc_chunk_tryfail);
5225 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
5226 &pc_chunk_tryfail,
5227 "Number of failed attempts to get a pv entry chunk page");
5228
5229 static COUNTER_U64_DEFINE_EARLY(pv_entry_frees);
5230 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
5231 &pv_entry_frees, "Total number of pv entries freed");
5232
5233 static COUNTER_U64_DEFINE_EARLY(pv_entry_allocs);
5234 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
5235 &pv_entry_allocs, "Total number of pv entries allocated");
5236
5237 static COUNTER_U64_DEFINE_EARLY(pv_entry_count);
5238 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
5239 &pv_entry_count, "Current number of pv entries");
5240
5241 static COUNTER_U64_DEFINE_EARLY(pv_entry_spare);
5242 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
5243 &pv_entry_spare, "Current number of spare pv entries");
5244 #endif
5245
5246 static void
reclaim_pv_chunk_leave_pmap(pmap_t pmap,pmap_t locked_pmap,bool start_di)5247 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
5248 {
5249
5250 if (pmap == NULL)
5251 return;
5252 pmap_invalidate_all(pmap);
5253 if (pmap != locked_pmap)
5254 PMAP_UNLOCK(pmap);
5255 if (start_di)
5256 pmap_delayed_invl_finish();
5257 }
5258
5259 /*
5260 * We are in a serious low memory condition. Resort to
5261 * drastic measures to free some pages so we can allocate
5262 * another pv entry chunk.
5263 *
5264 * Returns NULL if PV entries were reclaimed from the specified pmap.
5265 *
5266 * We do not, however, unmap 2mpages because subsequent accesses will
5267 * allocate per-page pv entries until repromotion occurs, thereby
5268 * exacerbating the shortage of free pv entries.
5269 */
5270 static vm_page_t
reclaim_pv_chunk_domain(pmap_t locked_pmap,struct rwlock ** lockp,int domain)5271 reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
5272 {
5273 struct pv_chunks_list *pvc;
5274 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
5275 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
5276 struct md_page *pvh;
5277 pd_entry_t *pde;
5278 pmap_t next_pmap, pmap;
5279 pt_entry_t *pte, tpte;
5280 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
5281 pv_entry_t pv;
5282 vm_offset_t va;
5283 vm_page_t m, m_pc;
5284 struct spglist free;
5285 uint64_t inuse;
5286 int bit, field, freed;
5287 bool start_di, restart;
5288
5289 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
5290 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
5291 pmap = NULL;
5292 m_pc = NULL;
5293 PG_G = PG_A = PG_M = PG_RW = 0;
5294 SLIST_INIT(&free);
5295 bzero(&pc_marker_b, sizeof(pc_marker_b));
5296 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
5297 pc_marker = (struct pv_chunk *)&pc_marker_b;
5298 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
5299
5300 /*
5301 * A delayed invalidation block should already be active if
5302 * pmap_advise() or pmap_remove() called this function by way
5303 * of pmap_demote_pde_locked().
5304 */
5305 start_di = pmap_not_in_di();
5306
5307 pvc = &pv_chunks[domain];
5308 mtx_lock(&pvc->pvc_lock);
5309 pvc->active_reclaims++;
5310 TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru);
5311 TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru);
5312 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
5313 SLIST_EMPTY(&free)) {
5314 next_pmap = pc->pc_pmap;
5315 if (next_pmap == NULL) {
5316 /*
5317 * The next chunk is a marker. However, it is
5318 * not our marker, so active_reclaims must be
5319 * > 1. Consequently, the next_chunk code
5320 * will not rotate the pv_chunks list.
5321 */
5322 goto next_chunk;
5323 }
5324 mtx_unlock(&pvc->pvc_lock);
5325
5326 /*
5327 * A pv_chunk can only be removed from the pc_lru list
5328 * when both pc_chunks_mutex is owned and the
5329 * corresponding pmap is locked.
5330 */
5331 if (pmap != next_pmap) {
5332 restart = false;
5333 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
5334 start_di);
5335 pmap = next_pmap;
5336 /* Avoid deadlock and lock recursion. */
5337 if (pmap > locked_pmap) {
5338 RELEASE_PV_LIST_LOCK(lockp);
5339 PMAP_LOCK(pmap);
5340 if (start_di)
5341 pmap_delayed_invl_start();
5342 mtx_lock(&pvc->pvc_lock);
5343 restart = true;
5344 } else if (pmap != locked_pmap) {
5345 if (PMAP_TRYLOCK(pmap)) {
5346 if (start_di)
5347 pmap_delayed_invl_start();
5348 mtx_lock(&pvc->pvc_lock);
5349 restart = true;
5350 } else {
5351 pmap = NULL; /* pmap is not locked */
5352 mtx_lock(&pvc->pvc_lock);
5353 pc = TAILQ_NEXT(pc_marker, pc_lru);
5354 if (pc == NULL ||
5355 pc->pc_pmap != next_pmap)
5356 continue;
5357 goto next_chunk;
5358 }
5359 } else if (start_di)
5360 pmap_delayed_invl_start();
5361 PG_G = pmap_global_bit(pmap);
5362 PG_A = pmap_accessed_bit(pmap);
5363 PG_M = pmap_modified_bit(pmap);
5364 PG_RW = pmap_rw_bit(pmap);
5365 if (restart)
5366 continue;
5367 }
5368
5369 /*
5370 * Destroy every non-wired, 4 KB page mapping in the chunk.
5371 */
5372 freed = 0;
5373 for (field = 0; field < _NPCM; field++) {
5374 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
5375 inuse != 0; inuse &= ~(1UL << bit)) {
5376 bit = bsfq(inuse);
5377 pv = &pc->pc_pventry[field * 64 + bit];
5378 va = pv->pv_va;
5379 pde = pmap_pde(pmap, va);
5380 if ((*pde & PG_PS) != 0)
5381 continue;
5382 pte = pmap_pde_to_pte(pde, va);
5383 if ((*pte & PG_W) != 0)
5384 continue;
5385 tpte = pte_load_clear(pte);
5386 if ((tpte & PG_G) != 0)
5387 pmap_invalidate_page(pmap, va);
5388 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
5389 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5390 vm_page_dirty(m);
5391 if ((tpte & PG_A) != 0)
5392 vm_page_aflag_set(m, PGA_REFERENCED);
5393 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5394 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5395 m->md.pv_gen++;
5396 if (TAILQ_EMPTY(&m->md.pv_list) &&
5397 (m->flags & PG_FICTITIOUS) == 0) {
5398 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5399 if (TAILQ_EMPTY(&pvh->pv_list)) {
5400 vm_page_aflag_clear(m,
5401 PGA_WRITEABLE);
5402 }
5403 }
5404 pmap_delayed_invl_page(m);
5405 pc->pc_map[field] |= 1UL << bit;
5406 pmap_unuse_pt(pmap, va, *pde, &free);
5407 freed++;
5408 }
5409 }
5410 if (freed == 0) {
5411 mtx_lock(&pvc->pvc_lock);
5412 goto next_chunk;
5413 }
5414 /* Every freed mapping is for a 4 KB page. */
5415 pmap_resident_count_adj(pmap, -freed);
5416 PV_STAT(counter_u64_add(pv_entry_frees, freed));
5417 PV_STAT(counter_u64_add(pv_entry_spare, freed));
5418 PV_STAT(counter_u64_add(pv_entry_count, -freed));
5419 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5420 if (pc_is_free(pc)) {
5421 PV_STAT(counter_u64_add(pv_entry_spare, -_NPCPV));
5422 PV_STAT(counter_u64_add(pc_chunk_count, -1));
5423 PV_STAT(counter_u64_add(pc_chunk_frees, 1));
5424 /* Entire chunk is free; return it. */
5425 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5426 dump_drop_page(m_pc->phys_addr);
5427 mtx_lock(&pvc->pvc_lock);
5428 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5429 break;
5430 }
5431 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5432 mtx_lock(&pvc->pvc_lock);
5433 /* One freed pv entry in locked_pmap is sufficient. */
5434 if (pmap == locked_pmap)
5435 break;
5436 next_chunk:
5437 TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5438 TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru);
5439 if (pvc->active_reclaims == 1 && pmap != NULL) {
5440 /*
5441 * Rotate the pv chunks list so that we do not
5442 * scan the same pv chunks that could not be
5443 * freed (because they contained a wired
5444 * and/or superpage mapping) on every
5445 * invocation of reclaim_pv_chunk().
5446 */
5447 while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker) {
5448 MPASS(pc->pc_pmap != NULL);
5449 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5450 TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5451 }
5452 }
5453 }
5454 TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5455 TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru);
5456 pvc->active_reclaims--;
5457 mtx_unlock(&pvc->pvc_lock);
5458 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
5459 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
5460 m_pc = SLIST_FIRST(&free);
5461 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
5462 /* Recycle a freed page table page. */
5463 m_pc->ref_count = 1;
5464 }
5465 vm_page_free_pages_toq(&free, true);
5466 return (m_pc);
5467 }
5468
5469 static vm_page_t
reclaim_pv_chunk(pmap_t locked_pmap,struct rwlock ** lockp)5470 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
5471 {
5472 vm_page_t m;
5473 int i, domain;
5474
5475 domain = PCPU_GET(domain);
5476 for (i = 0; i < vm_ndomains; i++) {
5477 m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain);
5478 if (m != NULL)
5479 break;
5480 domain = (domain + 1) % vm_ndomains;
5481 }
5482
5483 return (m);
5484 }
5485
5486 /*
5487 * free the pv_entry back to the free list
5488 */
5489 static void
free_pv_entry(pmap_t pmap,pv_entry_t pv)5490 free_pv_entry(pmap_t pmap, pv_entry_t pv)
5491 {
5492 struct pv_chunk *pc;
5493 int idx, field, bit;
5494
5495 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5496 PV_STAT(counter_u64_add(pv_entry_frees, 1));
5497 PV_STAT(counter_u64_add(pv_entry_spare, 1));
5498 PV_STAT(counter_u64_add(pv_entry_count, -1));
5499 pc = pv_to_chunk(pv);
5500 idx = pv - &pc->pc_pventry[0];
5501 field = idx / 64;
5502 bit = idx % 64;
5503 pc->pc_map[field] |= 1ul << bit;
5504 if (!pc_is_free(pc)) {
5505 /* 98% of the time, pc is already at the head of the list. */
5506 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
5507 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5508 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5509 }
5510 return;
5511 }
5512 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5513 free_pv_chunk(pc);
5514 }
5515
5516 static void
free_pv_chunk_dequeued(struct pv_chunk * pc)5517 free_pv_chunk_dequeued(struct pv_chunk *pc)
5518 {
5519 vm_page_t m;
5520
5521 PV_STAT(counter_u64_add(pv_entry_spare, -_NPCPV));
5522 PV_STAT(counter_u64_add(pc_chunk_count, -1));
5523 PV_STAT(counter_u64_add(pc_chunk_frees, 1));
5524 counter_u64_add(pv_page_count, -1);
5525 /* entire chunk is free, return it */
5526 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5527 dump_drop_page(m->phys_addr);
5528 vm_page_unwire_noq(m);
5529 vm_page_free(m);
5530 }
5531
5532 static void
free_pv_chunk(struct pv_chunk * pc)5533 free_pv_chunk(struct pv_chunk *pc)
5534 {
5535 struct pv_chunks_list *pvc;
5536
5537 pvc = &pv_chunks[pc_to_domain(pc)];
5538 mtx_lock(&pvc->pvc_lock);
5539 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5540 mtx_unlock(&pvc->pvc_lock);
5541 free_pv_chunk_dequeued(pc);
5542 }
5543
5544 static void
free_pv_chunk_batch(struct pv_chunklist * batch)5545 free_pv_chunk_batch(struct pv_chunklist *batch)
5546 {
5547 struct pv_chunks_list *pvc;
5548 struct pv_chunk *pc, *npc;
5549 int i;
5550
5551 for (i = 0; i < vm_ndomains; i++) {
5552 if (TAILQ_EMPTY(&batch[i]))
5553 continue;
5554 pvc = &pv_chunks[i];
5555 mtx_lock(&pvc->pvc_lock);
5556 TAILQ_FOREACH(pc, &batch[i], pc_list) {
5557 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5558 }
5559 mtx_unlock(&pvc->pvc_lock);
5560 }
5561
5562 for (i = 0; i < vm_ndomains; i++) {
5563 TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) {
5564 free_pv_chunk_dequeued(pc);
5565 }
5566 }
5567 }
5568
5569 /*
5570 * Returns a new PV entry, allocating a new PV chunk from the system when
5571 * needed. If this PV chunk allocation fails and a PV list lock pointer was
5572 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
5573 * returned.
5574 *
5575 * The given PV list lock may be released.
5576 */
5577 static pv_entry_t
get_pv_entry(pmap_t pmap,struct rwlock ** lockp)5578 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
5579 {
5580 struct pv_chunks_list *pvc;
5581 int bit, field;
5582 pv_entry_t pv;
5583 struct pv_chunk *pc;
5584 vm_page_t m;
5585
5586 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5587 PV_STAT(counter_u64_add(pv_entry_allocs, 1));
5588 retry:
5589 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5590 if (pc != NULL) {
5591 for (field = 0; field < _NPCM; field++) {
5592 if (pc->pc_map[field]) {
5593 bit = bsfq(pc->pc_map[field]);
5594 break;
5595 }
5596 }
5597 if (field < _NPCM) {
5598 pv = &pc->pc_pventry[field * 64 + bit];
5599 pc->pc_map[field] &= ~(1ul << bit);
5600 /* If this was the last item, move it to tail */
5601 if (pc_is_full(pc)) {
5602 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5603 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
5604 pc_list);
5605 }
5606 PV_STAT(counter_u64_add(pv_entry_count, 1));
5607 PV_STAT(counter_u64_add(pv_entry_spare, -1));
5608 return (pv);
5609 }
5610 }
5611 /* No free items, allocate another chunk */
5612 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5613 if (m == NULL) {
5614 if (lockp == NULL) {
5615 PV_STAT(counter_u64_add(pc_chunk_tryfail, 1));
5616 return (NULL);
5617 }
5618 m = reclaim_pv_chunk(pmap, lockp);
5619 if (m == NULL)
5620 goto retry;
5621 } else
5622 counter_u64_add(pv_page_count, 1);
5623 PV_STAT(counter_u64_add(pc_chunk_count, 1));
5624 PV_STAT(counter_u64_add(pc_chunk_allocs, 1));
5625 dump_add_page(m->phys_addr);
5626 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5627 pc->pc_pmap = pmap;
5628 pc->pc_map[0] = PC_FREEN & ~1ul; /* preallocated bit 0 */
5629 pc->pc_map[1] = PC_FREEN;
5630 pc->pc_map[2] = PC_FREEL;
5631 pvc = &pv_chunks[vm_page_domain(m)];
5632 mtx_lock(&pvc->pvc_lock);
5633 TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5634 mtx_unlock(&pvc->pvc_lock);
5635 pv = &pc->pc_pventry[0];
5636 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5637 PV_STAT(counter_u64_add(pv_entry_count, 1));
5638 PV_STAT(counter_u64_add(pv_entry_spare, _NPCPV - 1));
5639 return (pv);
5640 }
5641
5642 /*
5643 * Returns the number of one bits within the given PV chunk map.
5644 *
5645 * The erratas for Intel processors state that "POPCNT Instruction May
5646 * Take Longer to Execute Than Expected". It is believed that the
5647 * issue is the spurious dependency on the destination register.
5648 * Provide a hint to the register rename logic that the destination
5649 * value is overwritten, by clearing it, as suggested in the
5650 * optimization manual. It should be cheap for unaffected processors
5651 * as well.
5652 *
5653 * Reference numbers for erratas are
5654 * 4th Gen Core: HSD146
5655 * 5th Gen Core: BDM85
5656 * 6th Gen Core: SKL029
5657 */
5658 static int
popcnt_pc_map_pq(uint64_t * map)5659 popcnt_pc_map_pq(uint64_t *map)
5660 {
5661 u_long result, tmp;
5662
5663 __asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
5664 "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
5665 "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
5666 : "=&r" (result), "=&r" (tmp)
5667 : "m" (map[0]), "m" (map[1]), "m" (map[2]));
5668 return (result);
5669 }
5670
5671 /*
5672 * Ensure that the number of spare PV entries in the specified pmap meets or
5673 * exceeds the given count, "needed".
5674 *
5675 * The given PV list lock may be released.
5676 */
5677 static void
reserve_pv_entries(pmap_t pmap,int needed,struct rwlock ** lockp)5678 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
5679 {
5680 struct pv_chunks_list *pvc;
5681 struct pch new_tail[PMAP_MEMDOM];
5682 struct pv_chunk *pc;
5683 vm_page_t m;
5684 int avail, free, i;
5685 bool reclaimed;
5686
5687 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5688 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
5689
5690 /*
5691 * Newly allocated PV chunks must be stored in a private list until
5692 * the required number of PV chunks have been allocated. Otherwise,
5693 * reclaim_pv_chunk() could recycle one of these chunks. In
5694 * contrast, these chunks must be added to the pmap upon allocation.
5695 */
5696 for (i = 0; i < PMAP_MEMDOM; i++)
5697 TAILQ_INIT(&new_tail[i]);
5698 retry:
5699 avail = 0;
5700 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
5701 #ifndef __POPCNT__
5702 if ((cpu_feature2 & CPUID2_POPCNT) == 0)
5703 bit_count((bitstr_t *)pc->pc_map, 0,
5704 sizeof(pc->pc_map) * NBBY, &free);
5705 else
5706 #endif
5707 free = popcnt_pc_map_pq(pc->pc_map);
5708 if (free == 0)
5709 break;
5710 avail += free;
5711 if (avail >= needed)
5712 break;
5713 }
5714 for (reclaimed = false; avail < needed; avail += _NPCPV) {
5715 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5716 if (m == NULL) {
5717 m = reclaim_pv_chunk(pmap, lockp);
5718 if (m == NULL)
5719 goto retry;
5720 reclaimed = true;
5721 } else
5722 counter_u64_add(pv_page_count, 1);
5723 PV_STAT(counter_u64_add(pc_chunk_count, 1));
5724 PV_STAT(counter_u64_add(pc_chunk_allocs, 1));
5725 dump_add_page(m->phys_addr);
5726 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5727 pc->pc_pmap = pmap;
5728 pc->pc_map[0] = PC_FREEN;
5729 pc->pc_map[1] = PC_FREEN;
5730 pc->pc_map[2] = PC_FREEL;
5731 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5732 TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru);
5733 PV_STAT(counter_u64_add(pv_entry_spare, _NPCPV));
5734
5735 /*
5736 * The reclaim might have freed a chunk from the current pmap.
5737 * If that chunk contained available entries, we need to
5738 * re-count the number of available entries.
5739 */
5740 if (reclaimed)
5741 goto retry;
5742 }
5743 for (i = 0; i < vm_ndomains; i++) {
5744 if (TAILQ_EMPTY(&new_tail[i]))
5745 continue;
5746 pvc = &pv_chunks[i];
5747 mtx_lock(&pvc->pvc_lock);
5748 TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru);
5749 mtx_unlock(&pvc->pvc_lock);
5750 }
5751 }
5752
5753 /*
5754 * First find and then remove the pv entry for the specified pmap and virtual
5755 * address from the specified pv list. Returns the pv entry if found and NULL
5756 * otherwise. This operation can be performed on pv lists for either 4KB or
5757 * 2MB page mappings.
5758 */
5759 static __inline pv_entry_t
pmap_pvh_remove(struct md_page * pvh,pmap_t pmap,vm_offset_t va)5760 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5761 {
5762 pv_entry_t pv;
5763
5764 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5765 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
5766 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5767 pvh->pv_gen++;
5768 break;
5769 }
5770 }
5771 return (pv);
5772 }
5773
5774 /*
5775 * After demotion from a 2MB page mapping to 512 4KB page mappings,
5776 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
5777 * entries for each of the 4KB page mappings.
5778 */
5779 static void
pmap_pv_demote_pde(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)5780 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5781 struct rwlock **lockp)
5782 {
5783 struct md_page *pvh;
5784 struct pv_chunk *pc;
5785 pv_entry_t pv;
5786 vm_offset_t va_last;
5787 vm_page_t m;
5788 int bit, field;
5789
5790 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5791 KASSERT((pa & PDRMASK) == 0,
5792 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
5793 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5794
5795 /*
5796 * Transfer the 2mpage's pv entry for this mapping to the first
5797 * page's pv list. Once this transfer begins, the pv list lock
5798 * must not be released until the last pv entry is reinstantiated.
5799 */
5800 pvh = pa_to_pvh(pa);
5801 va = trunc_2mpage(va);
5802 pv = pmap_pvh_remove(pvh, pmap, va);
5803 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
5804 m = PHYS_TO_VM_PAGE(pa);
5805 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5806 m->md.pv_gen++;
5807 /* Instantiate the remaining NPTEPG - 1 pv entries. */
5808 PV_STAT(counter_u64_add(pv_entry_allocs, NPTEPG - 1));
5809 va_last = va + NBPDR - PAGE_SIZE;
5810 for (;;) {
5811 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5812 KASSERT(!pc_is_full(pc), ("pmap_pv_demote_pde: missing spare"));
5813 for (field = 0; field < _NPCM; field++) {
5814 while (pc->pc_map[field]) {
5815 bit = bsfq(pc->pc_map[field]);
5816 pc->pc_map[field] &= ~(1ul << bit);
5817 pv = &pc->pc_pventry[field * 64 + bit];
5818 va += PAGE_SIZE;
5819 pv->pv_va = va;
5820 m++;
5821 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5822 ("pmap_pv_demote_pde: page %p is not managed", m));
5823 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5824 m->md.pv_gen++;
5825 if (va == va_last)
5826 goto out;
5827 }
5828 }
5829 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5830 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5831 }
5832 out:
5833 if (pc_is_full(pc)) {
5834 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5835 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5836 }
5837 PV_STAT(counter_u64_add(pv_entry_count, NPTEPG - 1));
5838 PV_STAT(counter_u64_add(pv_entry_spare, -(NPTEPG - 1)));
5839 }
5840
5841 #if VM_NRESERVLEVEL > 0
5842 /*
5843 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
5844 * replace the many pv entries for the 4KB page mappings by a single pv entry
5845 * for the 2MB page mapping.
5846 */
5847 static void
pmap_pv_promote_pde(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)5848 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5849 struct rwlock **lockp)
5850 {
5851 struct md_page *pvh;
5852 pv_entry_t pv;
5853 vm_offset_t va_last;
5854 vm_page_t m;
5855
5856 KASSERT((pa & PDRMASK) == 0,
5857 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
5858 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5859
5860 /*
5861 * Transfer the first page's pv entry for this mapping to the 2mpage's
5862 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
5863 * a transfer avoids the possibility that get_pv_entry() calls
5864 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
5865 * mappings that is being promoted.
5866 */
5867 m = PHYS_TO_VM_PAGE(pa);
5868 va = trunc_2mpage(va);
5869 pv = pmap_pvh_remove(&m->md, pmap, va);
5870 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
5871 pvh = pa_to_pvh(pa);
5872 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5873 pvh->pv_gen++;
5874 /* Free the remaining NPTEPG - 1 pv entries. */
5875 va_last = va + NBPDR - PAGE_SIZE;
5876 do {
5877 m++;
5878 va += PAGE_SIZE;
5879 pmap_pvh_free(&m->md, pmap, va);
5880 } while (va < va_last);
5881 }
5882 #endif /* VM_NRESERVLEVEL > 0 */
5883
5884 /*
5885 * First find and then destroy the pv entry for the specified pmap and virtual
5886 * address. This operation can be performed on pv lists for either 4KB or 2MB
5887 * page mappings.
5888 */
5889 static void
pmap_pvh_free(struct md_page * pvh,pmap_t pmap,vm_offset_t va)5890 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5891 {
5892 pv_entry_t pv;
5893
5894 pv = pmap_pvh_remove(pvh, pmap, va);
5895 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
5896 free_pv_entry(pmap, pv);
5897 }
5898
5899 /*
5900 * Conditionally create the PV entry for a 4KB page mapping if the required
5901 * memory can be allocated without resorting to reclamation.
5902 */
5903 static bool
pmap_try_insert_pv_entry(pmap_t pmap,vm_offset_t va,vm_page_t m,struct rwlock ** lockp)5904 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
5905 struct rwlock **lockp)
5906 {
5907 pv_entry_t pv;
5908
5909 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5910 /* Pass NULL instead of the lock pointer to disable reclamation. */
5911 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
5912 pv->pv_va = va;
5913 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5914 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5915 m->md.pv_gen++;
5916 return (true);
5917 } else
5918 return (false);
5919 }
5920
5921 /*
5922 * Create the PV entry for a 2MB page mapping. Always returns true unless the
5923 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
5924 * false if the PV entry cannot be allocated without resorting to reclamation.
5925 */
5926 static bool
pmap_pv_insert_pde(pmap_t pmap,vm_offset_t va,pd_entry_t pde,u_int flags,struct rwlock ** lockp)5927 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
5928 struct rwlock **lockp)
5929 {
5930 struct md_page *pvh;
5931 pv_entry_t pv;
5932 vm_paddr_t pa;
5933
5934 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5935 /* Pass NULL instead of the lock pointer to disable reclamation. */
5936 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
5937 NULL : lockp)) == NULL)
5938 return (false);
5939 pv->pv_va = va;
5940 pa = pde & PG_PS_FRAME;
5941 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5942 pvh = pa_to_pvh(pa);
5943 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5944 pvh->pv_gen++;
5945 return (true);
5946 }
5947
5948 /*
5949 * Fills a page table page with mappings to consecutive physical pages.
5950 */
5951 static void
pmap_fill_ptp(pt_entry_t * firstpte,pt_entry_t newpte)5952 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
5953 {
5954 pt_entry_t *pte;
5955
5956 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
5957 *pte = newpte;
5958 newpte += PAGE_SIZE;
5959 }
5960 }
5961
5962 /*
5963 * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page
5964 * mapping is invalidated.
5965 */
5966 static bool
pmap_demote_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t va)5967 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
5968 {
5969 struct rwlock *lock;
5970 bool rv;
5971
5972 lock = NULL;
5973 rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
5974 if (lock != NULL)
5975 rw_wunlock(lock);
5976 return (rv);
5977 }
5978
5979 static void
pmap_demote_pde_check(pt_entry_t * firstpte __unused,pt_entry_t newpte __unused)5980 pmap_demote_pde_check(pt_entry_t *firstpte __unused, pt_entry_t newpte __unused)
5981 {
5982 #ifdef INVARIANTS
5983 #ifdef DIAGNOSTIC
5984 pt_entry_t *xpte, *ypte;
5985
5986 for (xpte = firstpte; xpte < firstpte + NPTEPG;
5987 xpte++, newpte += PAGE_SIZE) {
5988 if ((*xpte & PG_FRAME) != (newpte & PG_FRAME)) {
5989 printf("pmap_demote_pde: xpte %zd and newpte map "
5990 "different pages: found %#lx, expected %#lx\n",
5991 xpte - firstpte, *xpte, newpte);
5992 printf("page table dump\n");
5993 for (ypte = firstpte; ypte < firstpte + NPTEPG; ypte++)
5994 printf("%zd %#lx\n", ypte - firstpte, *ypte);
5995 panic("firstpte");
5996 }
5997 }
5998 #else
5999 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
6000 ("pmap_demote_pde: firstpte and newpte map different physical"
6001 " addresses"));
6002 #endif
6003 #endif
6004 }
6005
6006 static void
pmap_demote_pde_abort(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,pd_entry_t oldpde,struct rwlock ** lockp)6007 pmap_demote_pde_abort(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
6008 pd_entry_t oldpde, struct rwlock **lockp)
6009 {
6010 struct spglist free;
6011 vm_offset_t sva;
6012
6013 SLIST_INIT(&free);
6014 sva = trunc_2mpage(va);
6015 pmap_remove_pde(pmap, pde, sva, &free, lockp);
6016 if ((oldpde & pmap_global_bit(pmap)) == 0)
6017 pmap_invalidate_pde_page(pmap, sva, oldpde);
6018 vm_page_free_pages_toq(&free, true);
6019 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx in pmap %p",
6020 va, pmap);
6021 }
6022
6023 static bool
pmap_demote_pde_locked(pmap_t pmap,pd_entry_t * pde,vm_offset_t va,struct rwlock ** lockp)6024 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
6025 struct rwlock **lockp)
6026 {
6027 pd_entry_t newpde, oldpde;
6028 pt_entry_t *firstpte, newpte;
6029 pt_entry_t PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
6030 vm_paddr_t mptepa;
6031 vm_page_t mpte;
6032 int PG_PTE_CACHE;
6033 bool in_kernel;
6034
6035 PG_A = pmap_accessed_bit(pmap);
6036 PG_G = pmap_global_bit(pmap);
6037 PG_M = pmap_modified_bit(pmap);
6038 PG_RW = pmap_rw_bit(pmap);
6039 PG_V = pmap_valid_bit(pmap);
6040 PG_PTE_CACHE = pmap_cache_mask(pmap, false);
6041 PG_PKU_MASK = pmap_pku_mask_bit(pmap);
6042
6043 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6044 in_kernel = va >= VM_MAXUSER_ADDRESS;
6045 oldpde = *pde;
6046 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
6047 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
6048
6049 /*
6050 * Invalidate the 2MB page mapping and return "failure" if the
6051 * mapping was never accessed.
6052 */
6053 if ((oldpde & PG_A) == 0) {
6054 KASSERT((oldpde & PG_W) == 0,
6055 ("pmap_demote_pde: a wired mapping is missing PG_A"));
6056 pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
6057 return (false);
6058 }
6059
6060 mpte = pmap_remove_pt_page(pmap, va);
6061 if (mpte == NULL) {
6062 KASSERT((oldpde & PG_W) == 0,
6063 ("pmap_demote_pde: page table page for a wired mapping"
6064 " is missing"));
6065
6066 /*
6067 * If the page table page is missing and the mapping
6068 * is for a kernel address, the mapping must belong to
6069 * the direct map. Page table pages are preallocated
6070 * for every other part of the kernel address space,
6071 * so the direct map region is the only part of the
6072 * kernel address space that must be handled here.
6073 */
6074 KASSERT(!in_kernel || (va >= DMAP_MIN_ADDRESS &&
6075 va < DMAP_MAX_ADDRESS),
6076 ("pmap_demote_pde: No saved mpte for va %#lx", va));
6077
6078 /*
6079 * If the 2MB page mapping belongs to the direct map
6080 * region of the kernel's address space, then the page
6081 * allocation request specifies the highest possible
6082 * priority (VM_ALLOC_INTERRUPT). Otherwise, the
6083 * priority is normal.
6084 */
6085 mpte = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va),
6086 (in_kernel ? VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED);
6087
6088 /*
6089 * If the allocation of the new page table page fails,
6090 * invalidate the 2MB page mapping and return "failure".
6091 */
6092 if (mpte == NULL) {
6093 pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
6094 return (false);
6095 }
6096
6097 if (!in_kernel)
6098 mpte->ref_count = NPTEPG;
6099 }
6100 mptepa = VM_PAGE_TO_PHYS(mpte);
6101 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
6102 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
6103 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
6104 ("pmap_demote_pde: oldpde is missing PG_M"));
6105 newpte = oldpde & ~PG_PS;
6106 newpte = pmap_swap_pat(pmap, newpte);
6107
6108 /*
6109 * If the PTP is not leftover from an earlier promotion or it does not
6110 * have PG_A set in every PTE, then fill it. The new PTEs will all
6111 * have PG_A set.
6112 */
6113 if (!vm_page_all_valid(mpte))
6114 pmap_fill_ptp(firstpte, newpte);
6115
6116 pmap_demote_pde_check(firstpte, newpte);
6117
6118 /*
6119 * If the mapping has changed attributes, update the PTEs.
6120 */
6121 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
6122 pmap_fill_ptp(firstpte, newpte);
6123
6124 /*
6125 * The spare PV entries must be reserved prior to demoting the
6126 * mapping, that is, prior to changing the PDE. Otherwise, the state
6127 * of the PDE and the PV lists will be inconsistent, which can result
6128 * in reclaim_pv_chunk() attempting to remove a PV entry from the
6129 * wrong PV list and pmap_pv_demote_pde() failing to find the expected
6130 * PV entry for the 2MB page mapping that is being demoted.
6131 */
6132 if ((oldpde & PG_MANAGED) != 0)
6133 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
6134
6135 /*
6136 * Demote the mapping. This pmap is locked. The old PDE has
6137 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
6138 * set. Thus, there is no danger of a race with another
6139 * processor changing the setting of PG_A and/or PG_M between
6140 * the read above and the store below.
6141 */
6142 if (workaround_erratum383)
6143 pmap_update_pde(pmap, va, pde, newpde);
6144 else
6145 pde_store(pde, newpde);
6146
6147 /*
6148 * Invalidate a stale recursive mapping of the page table page.
6149 */
6150 if (in_kernel)
6151 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
6152
6153 /*
6154 * Demote the PV entry.
6155 */
6156 if ((oldpde & PG_MANAGED) != 0)
6157 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
6158
6159 counter_u64_add(pmap_pde_demotions, 1);
6160 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx in pmap %p",
6161 va, pmap);
6162 return (true);
6163 }
6164
6165 /*
6166 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
6167 */
6168 static void
pmap_remove_kernel_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t va)6169 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
6170 {
6171 pd_entry_t newpde;
6172 vm_paddr_t mptepa;
6173 vm_page_t mpte;
6174
6175 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
6176 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6177 mpte = pmap_remove_pt_page(pmap, va);
6178 if (mpte == NULL)
6179 panic("pmap_remove_kernel_pde: Missing pt page.");
6180
6181 mptepa = VM_PAGE_TO_PHYS(mpte);
6182 newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
6183
6184 /*
6185 * If this page table page was unmapped by a promotion, then it
6186 * contains valid mappings. Zero it to invalidate those mappings.
6187 */
6188 if (vm_page_any_valid(mpte))
6189 pagezero((void *)PHYS_TO_DMAP(mptepa));
6190
6191 /*
6192 * Demote the mapping.
6193 */
6194 if (workaround_erratum383)
6195 pmap_update_pde(pmap, va, pde, newpde);
6196 else
6197 pde_store(pde, newpde);
6198
6199 /*
6200 * Invalidate a stale recursive mapping of the page table page.
6201 */
6202 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
6203 }
6204
6205 /*
6206 * pmap_remove_pde: do the things to unmap a superpage in a process
6207 */
6208 static int
pmap_remove_pde(pmap_t pmap,pd_entry_t * pdq,vm_offset_t sva,struct spglist * free,struct rwlock ** lockp)6209 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
6210 struct spglist *free, struct rwlock **lockp)
6211 {
6212 struct md_page *pvh;
6213 pd_entry_t oldpde;
6214 vm_offset_t eva, va;
6215 vm_page_t m, mpte;
6216 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
6217
6218 PG_G = pmap_global_bit(pmap);
6219 PG_A = pmap_accessed_bit(pmap);
6220 PG_M = pmap_modified_bit(pmap);
6221 PG_RW = pmap_rw_bit(pmap);
6222
6223 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6224 KASSERT((sva & PDRMASK) == 0,
6225 ("pmap_remove_pde: sva is not 2mpage aligned"));
6226 oldpde = pte_load_clear(pdq);
6227 if (oldpde & PG_W)
6228 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
6229 if ((oldpde & PG_G) != 0)
6230 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
6231 pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE);
6232 if (oldpde & PG_MANAGED) {
6233 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
6234 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
6235 pmap_pvh_free(pvh, pmap, sva);
6236 eva = sva + NBPDR;
6237 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
6238 va < eva; va += PAGE_SIZE, m++) {
6239 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
6240 vm_page_dirty(m);
6241 if (oldpde & PG_A)
6242 vm_page_aflag_set(m, PGA_REFERENCED);
6243 if (TAILQ_EMPTY(&m->md.pv_list) &&
6244 TAILQ_EMPTY(&pvh->pv_list))
6245 vm_page_aflag_clear(m, PGA_WRITEABLE);
6246 pmap_delayed_invl_page(m);
6247 }
6248 }
6249 if (pmap == kernel_pmap) {
6250 pmap_remove_kernel_pde(pmap, pdq, sva);
6251 } else {
6252 mpte = pmap_remove_pt_page(pmap, sva);
6253 if (mpte != NULL) {
6254 KASSERT(vm_page_any_valid(mpte),
6255 ("pmap_remove_pde: pte page not promoted"));
6256 pmap_pt_page_count_adj(pmap, -1);
6257 KASSERT(mpte->ref_count == NPTEPG,
6258 ("pmap_remove_pde: pte page ref count error"));
6259 mpte->ref_count = 0;
6260 pmap_add_delayed_free_list(mpte, free, false);
6261 }
6262 }
6263 return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
6264 }
6265
6266 /*
6267 * pmap_remove_pte: do the things to unmap a page in a process
6268 */
6269 static int
pmap_remove_pte(pmap_t pmap,pt_entry_t * ptq,vm_offset_t va,pd_entry_t ptepde,struct spglist * free,struct rwlock ** lockp)6270 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
6271 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
6272 {
6273 struct md_page *pvh;
6274 pt_entry_t oldpte, PG_A, PG_M, PG_RW;
6275 vm_page_t m;
6276
6277 PG_A = pmap_accessed_bit(pmap);
6278 PG_M = pmap_modified_bit(pmap);
6279 PG_RW = pmap_rw_bit(pmap);
6280
6281 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6282 oldpte = pte_load_clear(ptq);
6283 if (oldpte & PG_W)
6284 pmap->pm_stats.wired_count -= 1;
6285 pmap_resident_count_adj(pmap, -1);
6286 if (oldpte & PG_MANAGED) {
6287 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
6288 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6289 vm_page_dirty(m);
6290 if (oldpte & PG_A)
6291 vm_page_aflag_set(m, PGA_REFERENCED);
6292 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
6293 pmap_pvh_free(&m->md, pmap, va);
6294 if (TAILQ_EMPTY(&m->md.pv_list) &&
6295 (m->flags & PG_FICTITIOUS) == 0) {
6296 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
6297 if (TAILQ_EMPTY(&pvh->pv_list))
6298 vm_page_aflag_clear(m, PGA_WRITEABLE);
6299 }
6300 pmap_delayed_invl_page(m);
6301 }
6302 return (pmap_unuse_pt(pmap, va, ptepde, free));
6303 }
6304
6305 /*
6306 * Remove a single page from a process address space
6307 */
6308 static void
pmap_remove_page(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,struct spglist * free)6309 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
6310 struct spglist *free)
6311 {
6312 struct rwlock *lock;
6313 pt_entry_t *pte, PG_V;
6314
6315 PG_V = pmap_valid_bit(pmap);
6316 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6317 if ((*pde & PG_V) == 0)
6318 return;
6319 pte = pmap_pde_to_pte(pde, va);
6320 if ((*pte & PG_V) == 0)
6321 return;
6322 lock = NULL;
6323 pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
6324 if (lock != NULL)
6325 rw_wunlock(lock);
6326 pmap_invalidate_page(pmap, va);
6327 }
6328
6329 /*
6330 * Removes the specified range of addresses from the page table page.
6331 */
6332 static bool
pmap_remove_ptes(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pd_entry_t * pde,struct spglist * free,struct rwlock ** lockp)6333 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
6334 pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
6335 {
6336 pt_entry_t PG_G, *pte;
6337 vm_offset_t va;
6338 bool anyvalid;
6339
6340 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6341 PG_G = pmap_global_bit(pmap);
6342 anyvalid = false;
6343 va = eva;
6344 for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
6345 sva += PAGE_SIZE) {
6346 if (*pte == 0) {
6347 if (va != eva) {
6348 pmap_invalidate_range(pmap, va, sva);
6349 va = eva;
6350 }
6351 continue;
6352 }
6353 if ((*pte & PG_G) == 0)
6354 anyvalid = true;
6355 else if (va == eva)
6356 va = sva;
6357 if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
6358 sva += PAGE_SIZE;
6359 break;
6360 }
6361 }
6362 if (va != eva)
6363 pmap_invalidate_range(pmap, va, sva);
6364 return (anyvalid);
6365 }
6366
6367 static void
pmap_remove1(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool map_delete)6368 pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete)
6369 {
6370 struct rwlock *lock;
6371 vm_page_t mt;
6372 vm_offset_t va_next;
6373 pml5_entry_t *pml5e;
6374 pml4_entry_t *pml4e;
6375 pdp_entry_t *pdpe;
6376 pd_entry_t ptpaddr, *pde;
6377 pt_entry_t PG_G, PG_V;
6378 struct spglist free;
6379 int anyvalid;
6380
6381 PG_G = pmap_global_bit(pmap);
6382 PG_V = pmap_valid_bit(pmap);
6383
6384 /*
6385 * If there are no resident pages besides the top level page
6386 * table page(s), there is nothing to do. Kernel pmap always
6387 * accounts whole preloaded area as resident, which makes its
6388 * resident count > 2.
6389 * Perform an unsynchronized read. This is, however, safe.
6390 */
6391 if (pmap->pm_stats.resident_count <= 1 + (pmap->pm_pmltopu != NULL ?
6392 1 : 0))
6393 return;
6394
6395 anyvalid = 0;
6396 SLIST_INIT(&free);
6397
6398 pmap_delayed_invl_start();
6399 PMAP_LOCK(pmap);
6400 if (map_delete)
6401 pmap_pkru_on_remove(pmap, sva, eva);
6402
6403 /*
6404 * special handling of removing one page. a very
6405 * common operation and easy to short circuit some
6406 * code.
6407 */
6408 if (sva + PAGE_SIZE == eva) {
6409 pde = pmap_pde(pmap, sva);
6410 if (pde && (*pde & PG_PS) == 0) {
6411 pmap_remove_page(pmap, sva, pde, &free);
6412 goto out;
6413 }
6414 }
6415
6416 lock = NULL;
6417 for (; sva < eva; sva = va_next) {
6418 if (pmap->pm_stats.resident_count == 0)
6419 break;
6420
6421 if (pmap_is_la57(pmap)) {
6422 pml5e = pmap_pml5e(pmap, sva);
6423 if ((*pml5e & PG_V) == 0) {
6424 va_next = (sva + NBPML5) & ~PML5MASK;
6425 if (va_next < sva)
6426 va_next = eva;
6427 continue;
6428 }
6429 pml4e = pmap_pml5e_to_pml4e(pml5e, sva);
6430 } else {
6431 pml4e = pmap_pml4e(pmap, sva);
6432 }
6433 if ((*pml4e & PG_V) == 0) {
6434 va_next = (sva + NBPML4) & ~PML4MASK;
6435 if (va_next < sva)
6436 va_next = eva;
6437 continue;
6438 }
6439
6440 va_next = (sva + NBPDP) & ~PDPMASK;
6441 if (va_next < sva)
6442 va_next = eva;
6443 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6444 if ((*pdpe & PG_V) == 0)
6445 continue;
6446 if ((*pdpe & PG_PS) != 0) {
6447 KASSERT(va_next <= eva,
6448 ("partial update of non-transparent 1G mapping "
6449 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6450 *pdpe, sva, eva, va_next));
6451 MPASS(pmap != kernel_pmap); /* XXXKIB */
6452 MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
6453 anyvalid = 1;
6454 *pdpe = 0;
6455 pmap_resident_count_adj(pmap, -NBPDP / PAGE_SIZE);
6456 mt = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, sva) & PG_FRAME);
6457 pmap_unwire_ptp(pmap, sva, mt, &free);
6458 continue;
6459 }
6460
6461 /*
6462 * Calculate index for next page table.
6463 */
6464 va_next = (sva + NBPDR) & ~PDRMASK;
6465 if (va_next < sva)
6466 va_next = eva;
6467
6468 pde = pmap_pdpe_to_pde(pdpe, sva);
6469 ptpaddr = *pde;
6470
6471 /*
6472 * Weed out invalid mappings.
6473 */
6474 if (ptpaddr == 0)
6475 continue;
6476
6477 /*
6478 * Check for large page.
6479 */
6480 if ((ptpaddr & PG_PS) != 0) {
6481 /*
6482 * Are we removing the entire large page? If not,
6483 * demote the mapping and fall through.
6484 */
6485 if (sva + NBPDR == va_next && eva >= va_next) {
6486 /*
6487 * The TLB entry for a PG_G mapping is
6488 * invalidated by pmap_remove_pde().
6489 */
6490 if ((ptpaddr & PG_G) == 0)
6491 anyvalid = 1;
6492 pmap_remove_pde(pmap, pde, sva, &free, &lock);
6493 continue;
6494 } else if (!pmap_demote_pde_locked(pmap, pde, sva,
6495 &lock)) {
6496 /* The large page mapping was destroyed. */
6497 continue;
6498 } else
6499 ptpaddr = *pde;
6500 }
6501
6502 /*
6503 * Limit our scan to either the end of the va represented
6504 * by the current page table page, or to the end of the
6505 * range being removed.
6506 */
6507 if (va_next > eva)
6508 va_next = eva;
6509
6510 if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
6511 anyvalid = 1;
6512 }
6513 if (lock != NULL)
6514 rw_wunlock(lock);
6515 out:
6516 if (anyvalid)
6517 pmap_invalidate_all(pmap);
6518 PMAP_UNLOCK(pmap);
6519 pmap_delayed_invl_finish();
6520 vm_page_free_pages_toq(&free, true);
6521 }
6522
6523 /*
6524 * Remove the given range of addresses from the specified map.
6525 *
6526 * It is assumed that the start and end are properly
6527 * rounded to the page size.
6528 */
6529 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)6530 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6531 {
6532 pmap_remove1(pmap, sva, eva, false);
6533 }
6534
6535 /*
6536 * Remove the given range of addresses as part of a logical unmap
6537 * operation. This has the effect of calling pmap_remove(), but
6538 * also clears any metadata that should persist for the lifetime
6539 * of a logical mapping.
6540 */
6541 void
pmap_map_delete(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)6542 pmap_map_delete(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6543 {
6544 pmap_remove1(pmap, sva, eva, true);
6545 }
6546
6547 /*
6548 * Routine: pmap_remove_all
6549 * Function:
6550 * Removes this physical page from
6551 * all physical maps in which it resides.
6552 * Reflects back modify bits to the pager.
6553 *
6554 * Notes:
6555 * Original versions of this routine were very
6556 * inefficient because they iteratively called
6557 * pmap_remove (slow...)
6558 */
6559
6560 void
pmap_remove_all(vm_page_t m)6561 pmap_remove_all(vm_page_t m)
6562 {
6563 struct md_page *pvh;
6564 pv_entry_t pv;
6565 pmap_t pmap;
6566 struct rwlock *lock;
6567 pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
6568 pd_entry_t *pde;
6569 vm_offset_t va;
6570 struct spglist free;
6571 int pvh_gen, md_gen;
6572
6573 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6574 ("pmap_remove_all: page %p is not managed", m));
6575 SLIST_INIT(&free);
6576 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6577 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
6578 pa_to_pvh(VM_PAGE_TO_PHYS(m));
6579 rw_wlock(lock);
6580 retry:
6581 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
6582 pmap = PV_PMAP(pv);
6583 if (!PMAP_TRYLOCK(pmap)) {
6584 pvh_gen = pvh->pv_gen;
6585 rw_wunlock(lock);
6586 PMAP_LOCK(pmap);
6587 rw_wlock(lock);
6588 if (pvh_gen != pvh->pv_gen) {
6589 PMAP_UNLOCK(pmap);
6590 goto retry;
6591 }
6592 }
6593 va = pv->pv_va;
6594 pde = pmap_pde(pmap, va);
6595 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
6596 PMAP_UNLOCK(pmap);
6597 }
6598 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
6599 pmap = PV_PMAP(pv);
6600 if (!PMAP_TRYLOCK(pmap)) {
6601 pvh_gen = pvh->pv_gen;
6602 md_gen = m->md.pv_gen;
6603 rw_wunlock(lock);
6604 PMAP_LOCK(pmap);
6605 rw_wlock(lock);
6606 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6607 PMAP_UNLOCK(pmap);
6608 goto retry;
6609 }
6610 }
6611 PG_A = pmap_accessed_bit(pmap);
6612 PG_M = pmap_modified_bit(pmap);
6613 PG_RW = pmap_rw_bit(pmap);
6614 pmap_resident_count_adj(pmap, -1);
6615 pde = pmap_pde(pmap, pv->pv_va);
6616 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
6617 " a 2mpage in page %p's pv list", m));
6618 pte = pmap_pde_to_pte(pde, pv->pv_va);
6619 tpte = pte_load_clear(pte);
6620 if (tpte & PG_W)
6621 pmap->pm_stats.wired_count--;
6622 if (tpte & PG_A)
6623 vm_page_aflag_set(m, PGA_REFERENCED);
6624
6625 /*
6626 * Update the vm_page_t clean and reference bits.
6627 */
6628 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6629 vm_page_dirty(m);
6630 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
6631 pmap_invalidate_page(pmap, pv->pv_va);
6632 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
6633 m->md.pv_gen++;
6634 free_pv_entry(pmap, pv);
6635 PMAP_UNLOCK(pmap);
6636 }
6637 vm_page_aflag_clear(m, PGA_WRITEABLE);
6638 rw_wunlock(lock);
6639 pmap_delayed_invl_wait(m);
6640 vm_page_free_pages_toq(&free, true);
6641 }
6642
6643 /*
6644 * pmap_protect_pde: do the things to protect a 2mpage in a process
6645 */
6646 static bool
pmap_protect_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t sva,vm_prot_t prot)6647 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
6648 {
6649 pd_entry_t newpde, oldpde;
6650 vm_page_t m, mt;
6651 bool anychanged;
6652 pt_entry_t PG_G, PG_M, PG_RW;
6653
6654 PG_G = pmap_global_bit(pmap);
6655 PG_M = pmap_modified_bit(pmap);
6656 PG_RW = pmap_rw_bit(pmap);
6657
6658 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6659 KASSERT((sva & PDRMASK) == 0,
6660 ("pmap_protect_pde: sva is not 2mpage aligned"));
6661 anychanged = false;
6662 retry:
6663 oldpde = newpde = *pde;
6664 if ((prot & VM_PROT_WRITE) == 0) {
6665 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
6666 (PG_MANAGED | PG_M | PG_RW)) {
6667 m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
6668 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
6669 vm_page_dirty(mt);
6670 }
6671 newpde &= ~(PG_RW | PG_M);
6672 }
6673 if ((prot & VM_PROT_EXECUTE) == 0)
6674 newpde |= pg_nx;
6675 if (newpde != oldpde) {
6676 /*
6677 * As an optimization to future operations on this PDE, clear
6678 * PG_PROMOTED. The impending invalidation will remove any
6679 * lingering 4KB page mappings from the TLB.
6680 */
6681 if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
6682 goto retry;
6683 if ((oldpde & PG_G) != 0)
6684 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
6685 else
6686 anychanged = true;
6687 }
6688 return (anychanged);
6689 }
6690
6691 /*
6692 * Set the physical protection on the
6693 * specified range of this map as requested.
6694 */
6695 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)6696 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
6697 {
6698 vm_page_t m;
6699 vm_offset_t va_next;
6700 pml4_entry_t *pml4e;
6701 pdp_entry_t *pdpe;
6702 pd_entry_t ptpaddr, *pde;
6703 pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
6704 pt_entry_t obits, pbits;
6705 bool anychanged;
6706
6707 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
6708 if (prot == VM_PROT_NONE) {
6709 pmap_remove(pmap, sva, eva);
6710 return;
6711 }
6712
6713 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
6714 (VM_PROT_WRITE|VM_PROT_EXECUTE))
6715 return;
6716
6717 PG_G = pmap_global_bit(pmap);
6718 PG_M = pmap_modified_bit(pmap);
6719 PG_V = pmap_valid_bit(pmap);
6720 PG_RW = pmap_rw_bit(pmap);
6721 anychanged = false;
6722
6723 /*
6724 * Although this function delays and batches the invalidation
6725 * of stale TLB entries, it does not need to call
6726 * pmap_delayed_invl_start() and
6727 * pmap_delayed_invl_finish(), because it does not
6728 * ordinarily destroy mappings. Stale TLB entries from
6729 * protection-only changes need only be invalidated before the
6730 * pmap lock is released, because protection-only changes do
6731 * not destroy PV entries. Even operations that iterate over
6732 * a physical page's PV list of mappings, like
6733 * pmap_remove_write(), acquire the pmap lock for each
6734 * mapping. Consequently, for protection-only changes, the
6735 * pmap lock suffices to synchronize both page table and TLB
6736 * updates.
6737 *
6738 * This function only destroys a mapping if pmap_demote_pde()
6739 * fails. In that case, stale TLB entries are immediately
6740 * invalidated.
6741 */
6742
6743 PMAP_LOCK(pmap);
6744 for (; sva < eva; sva = va_next) {
6745 pml4e = pmap_pml4e(pmap, sva);
6746 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
6747 va_next = (sva + NBPML4) & ~PML4MASK;
6748 if (va_next < sva)
6749 va_next = eva;
6750 continue;
6751 }
6752
6753 va_next = (sva + NBPDP) & ~PDPMASK;
6754 if (va_next < sva)
6755 va_next = eva;
6756 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6757 if ((*pdpe & PG_V) == 0)
6758 continue;
6759 if ((*pdpe & PG_PS) != 0) {
6760 KASSERT(va_next <= eva,
6761 ("partial update of non-transparent 1G mapping "
6762 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6763 *pdpe, sva, eva, va_next));
6764 retry_pdpe:
6765 obits = pbits = *pdpe;
6766 MPASS((pbits & (PG_MANAGED | PG_G)) == 0);
6767 MPASS(pmap != kernel_pmap); /* XXXKIB */
6768 if ((prot & VM_PROT_WRITE) == 0)
6769 pbits &= ~(PG_RW | PG_M);
6770 if ((prot & VM_PROT_EXECUTE) == 0)
6771 pbits |= pg_nx;
6772
6773 if (pbits != obits) {
6774 if (!atomic_cmpset_long(pdpe, obits, pbits))
6775 /* PG_PS cannot be cleared under us, */
6776 goto retry_pdpe;
6777 anychanged = true;
6778 }
6779 continue;
6780 }
6781
6782 va_next = (sva + NBPDR) & ~PDRMASK;
6783 if (va_next < sva)
6784 va_next = eva;
6785
6786 pde = pmap_pdpe_to_pde(pdpe, sva);
6787 ptpaddr = *pde;
6788
6789 /*
6790 * Weed out invalid mappings.
6791 */
6792 if (ptpaddr == 0)
6793 continue;
6794
6795 /*
6796 * Check for large page.
6797 */
6798 if ((ptpaddr & PG_PS) != 0) {
6799 /*
6800 * Are we protecting the entire large page?
6801 */
6802 if (sva + NBPDR == va_next && eva >= va_next) {
6803 /*
6804 * The TLB entry for a PG_G mapping is
6805 * invalidated by pmap_protect_pde().
6806 */
6807 if (pmap_protect_pde(pmap, pde, sva, prot))
6808 anychanged = true;
6809 continue;
6810 }
6811
6812 /*
6813 * Does the large page mapping need to change? If so,
6814 * demote it and fall through.
6815 */
6816 pbits = ptpaddr;
6817 if ((prot & VM_PROT_WRITE) == 0)
6818 pbits &= ~(PG_RW | PG_M);
6819 if ((prot & VM_PROT_EXECUTE) == 0)
6820 pbits |= pg_nx;
6821 if (ptpaddr == pbits || !pmap_demote_pde(pmap, pde,
6822 sva)) {
6823 /*
6824 * Either the large page mapping doesn't need
6825 * to change, or it was destroyed during
6826 * demotion.
6827 */
6828 continue;
6829 }
6830 }
6831
6832 if (va_next > eva)
6833 va_next = eva;
6834
6835 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
6836 sva += PAGE_SIZE) {
6837 retry:
6838 obits = pbits = *pte;
6839 if ((pbits & PG_V) == 0)
6840 continue;
6841
6842 if ((prot & VM_PROT_WRITE) == 0) {
6843 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
6844 (PG_MANAGED | PG_M | PG_RW)) {
6845 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
6846 vm_page_dirty(m);
6847 }
6848 pbits &= ~(PG_RW | PG_M);
6849 }
6850 if ((prot & VM_PROT_EXECUTE) == 0)
6851 pbits |= pg_nx;
6852
6853 if (pbits != obits) {
6854 if (!atomic_cmpset_long(pte, obits, pbits))
6855 goto retry;
6856 if (obits & PG_G)
6857 pmap_invalidate_page(pmap, sva);
6858 else
6859 anychanged = true;
6860 }
6861 }
6862 }
6863 if (anychanged)
6864 pmap_invalidate_all(pmap);
6865 PMAP_UNLOCK(pmap);
6866 }
6867
6868 static bool
pmap_pde_ept_executable(pmap_t pmap,pd_entry_t pde)6869 pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde)
6870 {
6871
6872 if (pmap->pm_type != PT_EPT)
6873 return (false);
6874 return ((pde & EPT_PG_EXECUTE) != 0);
6875 }
6876
6877 #if VM_NRESERVLEVEL > 0
6878 /*
6879 * Tries to promote the 512, contiguous 4KB page mappings that are within a
6880 * single page table page (PTP) to a single 2MB page mapping. For promotion
6881 * to occur, two conditions must be met: (1) the 4KB page mappings must map
6882 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
6883 * identical characteristics.
6884 */
6885 static bool
pmap_promote_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t va,vm_page_t mpte,struct rwlock ** lockp)6886 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte,
6887 struct rwlock **lockp)
6888 {
6889 pd_entry_t newpde;
6890 pt_entry_t *firstpte, oldpte, pa, *pte;
6891 pt_entry_t allpte_PG_A, PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
6892 int PG_PTE_CACHE;
6893
6894 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6895 if (!pmap_ps_enabled(pmap))
6896 return (false);
6897
6898 PG_A = pmap_accessed_bit(pmap);
6899 PG_G = pmap_global_bit(pmap);
6900 PG_M = pmap_modified_bit(pmap);
6901 PG_V = pmap_valid_bit(pmap);
6902 PG_RW = pmap_rw_bit(pmap);
6903 PG_PKU_MASK = pmap_pku_mask_bit(pmap);
6904 PG_PTE_CACHE = pmap_cache_mask(pmap, false);
6905
6906 /*
6907 * Examine the first PTE in the specified PTP. Abort if this PTE is
6908 * ineligible for promotion due to hardware errata, invalid, or does
6909 * not map the first 4KB physical page within a 2MB page.
6910 */
6911 firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
6912 newpde = *firstpte;
6913 if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap, newpde)))
6914 return (false);
6915 if ((newpde & ((PG_FRAME & PDRMASK) | PG_V)) != PG_V) {
6916 counter_u64_add(pmap_pde_p_failures, 1);
6917 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6918 " in pmap %p", va, pmap);
6919 return (false);
6920 }
6921
6922 /*
6923 * Both here and in the below "for" loop, to allow for repromotion
6924 * after MADV_FREE, conditionally write protect a clean PTE before
6925 * possibly aborting the promotion due to other PTE attributes. Why?
6926 * Suppose that MADV_FREE is applied to a part of a superpage, the
6927 * address range [S, E). pmap_advise() will demote the superpage
6928 * mapping, destroy the 4KB page mapping at the end of [S, E), and
6929 * clear PG_M and PG_A in the PTEs for the rest of [S, E). Later,
6930 * imagine that the memory in [S, E) is recycled, but the last 4KB
6931 * page in [S, E) is not the last to be rewritten, or simply accessed.
6932 * In other words, there is still a 4KB page in [S, E), call it P,
6933 * that is writeable but PG_M and PG_A are clear in P's PTE. Unless
6934 * we write protect P before aborting the promotion, if and when P is
6935 * finally rewritten, there won't be a page fault to trigger
6936 * repromotion.
6937 */
6938 setpde:
6939 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
6940 /*
6941 * When PG_M is already clear, PG_RW can be cleared without
6942 * a TLB invalidation.
6943 */
6944 if (!atomic_fcmpset_long(firstpte, &newpde, newpde & ~PG_RW))
6945 goto setpde;
6946 newpde &= ~PG_RW;
6947 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
6948 " in pmap %p", va & ~PDRMASK, pmap);
6949 }
6950
6951 /*
6952 * Examine each of the other PTEs in the specified PTP. Abort if this
6953 * PTE maps an unexpected 4KB physical page or does not have identical
6954 * characteristics to the first PTE.
6955 */
6956 allpte_PG_A = newpde & PG_A;
6957 pa = (newpde & (PG_PS_FRAME | PG_V)) + NBPDR - PAGE_SIZE;
6958 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
6959 oldpte = *pte;
6960 if ((oldpte & (PG_FRAME | PG_V)) != pa) {
6961 counter_u64_add(pmap_pde_p_failures, 1);
6962 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6963 " in pmap %p", va, pmap);
6964 return (false);
6965 }
6966 setpte:
6967 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
6968 /*
6969 * When PG_M is already clear, PG_RW can be cleared
6970 * without a TLB invalidation.
6971 */
6972 if (!atomic_fcmpset_long(pte, &oldpte, oldpte & ~PG_RW))
6973 goto setpte;
6974 oldpte &= ~PG_RW;
6975 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
6976 " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
6977 (va & ~PDRMASK), pmap);
6978 }
6979 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
6980 counter_u64_add(pmap_pde_p_failures, 1);
6981 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6982 " in pmap %p", va, pmap);
6983 return (false);
6984 }
6985 allpte_PG_A &= oldpte;
6986 pa -= PAGE_SIZE;
6987 }
6988
6989 /*
6990 * Unless all PTEs have PG_A set, clear it from the superpage mapping,
6991 * so that promotions triggered by speculative mappings, such as
6992 * pmap_enter_quick(), don't automatically mark the underlying pages
6993 * as referenced.
6994 */
6995 newpde &= ~PG_A | allpte_PG_A;
6996
6997 /*
6998 * EPT PTEs with PG_M set and PG_A clear are not supported by early
6999 * MMUs supporting EPT.
7000 */
7001 KASSERT((newpde & PG_A) != 0 || safe_to_clear_referenced(pmap, newpde),
7002 ("unsupported EPT PTE"));
7003
7004 /*
7005 * Save the PTP in its current state until the PDE mapping the
7006 * superpage is demoted by pmap_demote_pde() or destroyed by
7007 * pmap_remove_pde(). If PG_A is not set in every PTE, then request
7008 * that the PTP be refilled on demotion.
7009 */
7010 if (mpte == NULL)
7011 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7012 KASSERT(mpte >= vm_page_array &&
7013 mpte < &vm_page_array[vm_page_array_size],
7014 ("pmap_promote_pde: page table page is out of range"));
7015 KASSERT(mpte->pindex == pmap_pde_pindex(va),
7016 ("pmap_promote_pde: page table page's pindex is wrong "
7017 "mpte %p pidx %#lx va %#lx va pde pidx %#lx",
7018 mpte, mpte->pindex, va, pmap_pde_pindex(va)));
7019 if (pmap_insert_pt_page(pmap, mpte, true, allpte_PG_A != 0)) {
7020 counter_u64_add(pmap_pde_p_failures, 1);
7021 CTR2(KTR_PMAP,
7022 "pmap_promote_pde: failure for va %#lx in pmap %p", va,
7023 pmap);
7024 return (false);
7025 }
7026
7027 /*
7028 * Promote the pv entries.
7029 */
7030 if ((newpde & PG_MANAGED) != 0)
7031 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
7032
7033 /*
7034 * Propagate the PAT index to its proper position.
7035 */
7036 newpde = pmap_swap_pat(pmap, newpde);
7037
7038 /*
7039 * Map the superpage.
7040 */
7041 if (workaround_erratum383)
7042 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
7043 else
7044 pde_store(pde, PG_PROMOTED | PG_PS | newpde);
7045
7046 counter_u64_add(pmap_pde_promotions, 1);
7047 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
7048 " in pmap %p", va, pmap);
7049 return (true);
7050 }
7051 #endif /* VM_NRESERVLEVEL > 0 */
7052
7053 static int
pmap_enter_largepage(pmap_t pmap,vm_offset_t va,pt_entry_t newpte,int flags,int psind)7054 pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags,
7055 int psind)
7056 {
7057 vm_page_t mp;
7058 pt_entry_t origpte, *pml4e, *pdpe, *pde, pten, PG_V;
7059
7060 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7061 KASSERT(psind > 0 && psind < MAXPAGESIZES && pagesizes[psind] != 0,
7062 ("psind %d unexpected", psind));
7063 KASSERT(((newpte & PG_FRAME) & (pagesizes[psind] - 1)) == 0,
7064 ("unaligned phys address %#lx newpte %#lx psind %d",
7065 newpte & PG_FRAME, newpte, psind));
7066 KASSERT((va & (pagesizes[psind] - 1)) == 0,
7067 ("unaligned va %#lx psind %d", va, psind));
7068 KASSERT(va < VM_MAXUSER_ADDRESS,
7069 ("kernel mode non-transparent superpage")); /* XXXKIB */
7070 KASSERT(va + pagesizes[psind] < VM_MAXUSER_ADDRESS,
7071 ("overflowing user map va %#lx psind %d", va, psind)); /* XXXKIB */
7072
7073 PG_V = pmap_valid_bit(pmap);
7074
7075 restart:
7076 pten = newpte;
7077 if (!pmap_pkru_same(pmap, va, va + pagesizes[psind], &pten))
7078 return (KERN_PROTECTION_FAILURE);
7079
7080 if (psind == 2) { /* 1G */
7081 pml4e = pmap_pml4e(pmap, va);
7082 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
7083 mp = pmap_allocpte_alloc(pmap, pmap_pml4e_pindex(va),
7084 NULL, va);
7085 if (mp == NULL)
7086 goto allocf;
7087 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
7088 pdpe = &pdpe[pmap_pdpe_index(va)];
7089 origpte = *pdpe;
7090 MPASS(origpte == 0);
7091 } else {
7092 pdpe = pmap_pml4e_to_pdpe(pml4e, va);
7093 KASSERT(pdpe != NULL, ("va %#lx lost pdpe", va));
7094 origpte = *pdpe;
7095 if ((origpte & PG_V) == 0) {
7096 mp = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
7097 mp->ref_count++;
7098 }
7099 }
7100 *pdpe = pten;
7101 } else /* (psind == 1) */ { /* 2M */
7102 pde = pmap_pde(pmap, va);
7103 if (pde == NULL) {
7104 mp = pmap_allocpte_alloc(pmap, pmap_pdpe_pindex(va),
7105 NULL, va);
7106 if (mp == NULL)
7107 goto allocf;
7108 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
7109 pde = &pde[pmap_pde_index(va)];
7110 origpte = *pde;
7111 MPASS(origpte == 0);
7112 } else {
7113 origpte = *pde;
7114 if ((origpte & PG_V) == 0) {
7115 pdpe = pmap_pdpe(pmap, va);
7116 MPASS(pdpe != NULL && (*pdpe & PG_V) != 0);
7117 mp = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
7118 mp->ref_count++;
7119 }
7120 }
7121 *pde = pten;
7122 }
7123 KASSERT((origpte & PG_V) == 0 || ((origpte & PG_PS) != 0 &&
7124 (origpte & PG_PS_FRAME) == (pten & PG_PS_FRAME)),
7125 ("va %#lx changing %s phys page origpte %#lx pten %#lx",
7126 va, psind == 2 ? "1G" : "2M", origpte, pten));
7127 if ((pten & PG_W) != 0 && (origpte & PG_W) == 0)
7128 pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE;
7129 else if ((pten & PG_W) == 0 && (origpte & PG_W) != 0)
7130 pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE;
7131 if ((origpte & PG_V) == 0)
7132 pmap_resident_count_adj(pmap, pagesizes[psind] / PAGE_SIZE);
7133
7134 return (KERN_SUCCESS);
7135
7136 allocf:
7137 if ((flags & PMAP_ENTER_NOSLEEP) != 0)
7138 return (KERN_RESOURCE_SHORTAGE);
7139 PMAP_UNLOCK(pmap);
7140 vm_wait(NULL);
7141 PMAP_LOCK(pmap);
7142 goto restart;
7143 }
7144
7145 /*
7146 * Insert the given physical page (p) at
7147 * the specified virtual address (v) in the
7148 * target physical map with the protection requested.
7149 *
7150 * If specified, the page will be wired down, meaning
7151 * that the related pte can not be reclaimed.
7152 *
7153 * NB: This is the only routine which MAY NOT lazy-evaluate
7154 * or lose information. That is, this routine must actually
7155 * insert this page into the given map NOW.
7156 *
7157 * When destroying both a page table and PV entry, this function
7158 * performs the TLB invalidation before releasing the PV list
7159 * lock, so we do not need pmap_delayed_invl_page() calls here.
7160 */
7161 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)7162 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
7163 u_int flags, int8_t psind)
7164 {
7165 struct rwlock *lock;
7166 pd_entry_t *pde;
7167 pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
7168 pt_entry_t newpte, origpte;
7169 pv_entry_t pv;
7170 vm_paddr_t opa, pa;
7171 vm_page_t mpte, om;
7172 int rv;
7173 bool nosleep;
7174
7175 PG_A = pmap_accessed_bit(pmap);
7176 PG_G = pmap_global_bit(pmap);
7177 PG_M = pmap_modified_bit(pmap);
7178 PG_V = pmap_valid_bit(pmap);
7179 PG_RW = pmap_rw_bit(pmap);
7180
7181 va = trunc_page(va);
7182 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
7183 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
7184 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
7185 va));
7186 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
7187 ("pmap_enter: managed mapping within the clean submap"));
7188 if ((m->oflags & VPO_UNMANAGED) == 0)
7189 VM_PAGE_OBJECT_BUSY_ASSERT(m);
7190 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
7191 ("pmap_enter: flags %u has reserved bits set", flags));
7192 pa = VM_PAGE_TO_PHYS(m);
7193 newpte = (pt_entry_t)(pa | PG_A | PG_V);
7194 if ((flags & VM_PROT_WRITE) != 0)
7195 newpte |= PG_M;
7196 if ((prot & VM_PROT_WRITE) != 0)
7197 newpte |= PG_RW;
7198 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
7199 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
7200 if ((prot & VM_PROT_EXECUTE) == 0)
7201 newpte |= pg_nx;
7202 if ((flags & PMAP_ENTER_WIRED) != 0)
7203 newpte |= PG_W;
7204 if (va < VM_MAXUSER_ADDRESS)
7205 newpte |= PG_U;
7206 if (pmap == kernel_pmap)
7207 newpte |= PG_G;
7208 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
7209
7210 /*
7211 * Set modified bit gratuitously for writeable mappings if
7212 * the page is unmanaged. We do not want to take a fault
7213 * to do the dirty bit accounting for these mappings.
7214 */
7215 if ((m->oflags & VPO_UNMANAGED) != 0) {
7216 if ((newpte & PG_RW) != 0)
7217 newpte |= PG_M;
7218 } else
7219 newpte |= PG_MANAGED;
7220
7221 lock = NULL;
7222 PMAP_LOCK(pmap);
7223 if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
7224 KASSERT((m->oflags & VPO_UNMANAGED) != 0,
7225 ("managed largepage va %#lx flags %#x", va, flags));
7226 rv = pmap_enter_largepage(pmap, va, newpte | PG_PS, flags,
7227 psind);
7228 goto out;
7229 }
7230 if (psind == 1) {
7231 /* Assert the required virtual and physical alignment. */
7232 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
7233 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
7234 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
7235 goto out;
7236 }
7237 mpte = NULL;
7238
7239 /*
7240 * In the case that a page table page is not
7241 * resident, we are creating it here.
7242 */
7243 retry:
7244 pde = pmap_pde(pmap, va);
7245 if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
7246 pmap_demote_pde_locked(pmap, pde, va, &lock))) {
7247 pte = pmap_pde_to_pte(pde, va);
7248 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
7249 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7250 mpte->ref_count++;
7251 }
7252 } else if (va < VM_MAXUSER_ADDRESS) {
7253 /*
7254 * Here if the pte page isn't mapped, or if it has been
7255 * deallocated.
7256 */
7257 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
7258 mpte = pmap_allocpte_alloc(pmap, pmap_pde_pindex(va),
7259 nosleep ? NULL : &lock, va);
7260 if (mpte == NULL && nosleep) {
7261 rv = KERN_RESOURCE_SHORTAGE;
7262 goto out;
7263 }
7264 goto retry;
7265 } else
7266 panic("pmap_enter: invalid page directory va=%#lx", va);
7267
7268 origpte = *pte;
7269 pv = NULL;
7270 if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86)
7271 newpte |= pmap_pkru_get(pmap, va);
7272
7273 /*
7274 * Is the specified virtual address already mapped?
7275 */
7276 if ((origpte & PG_V) != 0) {
7277 /*
7278 * Wiring change, just update stats. We don't worry about
7279 * wiring PT pages as they remain resident as long as there
7280 * are valid mappings in them. Hence, if a user page is wired,
7281 * the PT page will be also.
7282 */
7283 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
7284 pmap->pm_stats.wired_count++;
7285 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
7286 pmap->pm_stats.wired_count--;
7287
7288 /*
7289 * Remove the extra PT page reference.
7290 */
7291 if (mpte != NULL) {
7292 mpte->ref_count--;
7293 KASSERT(mpte->ref_count > 0,
7294 ("pmap_enter: missing reference to page table page,"
7295 " va: 0x%lx", va));
7296 }
7297
7298 /*
7299 * Has the physical page changed?
7300 */
7301 opa = origpte & PG_FRAME;
7302 if (opa == pa) {
7303 /*
7304 * No, might be a protection or wiring change.
7305 */
7306 if ((origpte & PG_MANAGED) != 0 &&
7307 (newpte & PG_RW) != 0)
7308 vm_page_aflag_set(m, PGA_WRITEABLE);
7309 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
7310 goto unchanged;
7311 goto validate;
7312 }
7313
7314 /*
7315 * The physical page has changed. Temporarily invalidate
7316 * the mapping. This ensures that all threads sharing the
7317 * pmap keep a consistent view of the mapping, which is
7318 * necessary for the correct handling of COW faults. It
7319 * also permits reuse of the old mapping's PV entry,
7320 * avoiding an allocation.
7321 *
7322 * For consistency, handle unmanaged mappings the same way.
7323 */
7324 origpte = pte_load_clear(pte);
7325 KASSERT((origpte & PG_FRAME) == opa,
7326 ("pmap_enter: unexpected pa update for %#lx", va));
7327 if ((origpte & PG_MANAGED) != 0) {
7328 om = PHYS_TO_VM_PAGE(opa);
7329
7330 /*
7331 * The pmap lock is sufficient to synchronize with
7332 * concurrent calls to pmap_page_test_mappings() and
7333 * pmap_ts_referenced().
7334 */
7335 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
7336 vm_page_dirty(om);
7337 if ((origpte & PG_A) != 0) {
7338 pmap_invalidate_page(pmap, va);
7339 vm_page_aflag_set(om, PGA_REFERENCED);
7340 }
7341 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
7342 pv = pmap_pvh_remove(&om->md, pmap, va);
7343 KASSERT(pv != NULL,
7344 ("pmap_enter: no PV entry for %#lx", va));
7345 if ((newpte & PG_MANAGED) == 0)
7346 free_pv_entry(pmap, pv);
7347 if ((om->a.flags & PGA_WRITEABLE) != 0 &&
7348 TAILQ_EMPTY(&om->md.pv_list) &&
7349 ((om->flags & PG_FICTITIOUS) != 0 ||
7350 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
7351 vm_page_aflag_clear(om, PGA_WRITEABLE);
7352 } else {
7353 /*
7354 * Since this mapping is unmanaged, assume that PG_A
7355 * is set.
7356 */
7357 pmap_invalidate_page(pmap, va);
7358 }
7359 origpte = 0;
7360 } else {
7361 /*
7362 * Increment the counters.
7363 */
7364 if ((newpte & PG_W) != 0)
7365 pmap->pm_stats.wired_count++;
7366 pmap_resident_count_adj(pmap, 1);
7367 }
7368
7369 /*
7370 * Enter on the PV list if part of our managed memory.
7371 */
7372 if ((newpte & PG_MANAGED) != 0) {
7373 if (pv == NULL) {
7374 pv = get_pv_entry(pmap, &lock);
7375 pv->pv_va = va;
7376 }
7377 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
7378 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
7379 m->md.pv_gen++;
7380 if ((newpte & PG_RW) != 0)
7381 vm_page_aflag_set(m, PGA_WRITEABLE);
7382 }
7383
7384 /*
7385 * Update the PTE.
7386 */
7387 if ((origpte & PG_V) != 0) {
7388 validate:
7389 origpte = pte_load_store(pte, newpte);
7390 KASSERT((origpte & PG_FRAME) == pa,
7391 ("pmap_enter: unexpected pa update for %#lx", va));
7392 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
7393 (PG_M | PG_RW)) {
7394 if ((origpte & PG_MANAGED) != 0)
7395 vm_page_dirty(m);
7396
7397 /*
7398 * Although the PTE may still have PG_RW set, TLB
7399 * invalidation may nonetheless be required because
7400 * the PTE no longer has PG_M set.
7401 */
7402 } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
7403 /*
7404 * This PTE change does not require TLB invalidation.
7405 */
7406 goto unchanged;
7407 }
7408 if ((origpte & PG_A) != 0)
7409 pmap_invalidate_page(pmap, va);
7410 } else
7411 pte_store(pte, newpte);
7412
7413 unchanged:
7414
7415 #if VM_NRESERVLEVEL > 0
7416 /*
7417 * If both the page table page and the reservation are fully
7418 * populated, then attempt promotion.
7419 */
7420 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
7421 (m->flags & PG_FICTITIOUS) == 0 &&
7422 vm_reserv_level_iffullpop(m) == 0)
7423 (void)pmap_promote_pde(pmap, pde, va, mpte, &lock);
7424 #endif
7425
7426 rv = KERN_SUCCESS;
7427 out:
7428 if (lock != NULL)
7429 rw_wunlock(lock);
7430 PMAP_UNLOCK(pmap);
7431 return (rv);
7432 }
7433
7434 /*
7435 * Tries to create a read- and/or execute-only 2MB page mapping. Returns
7436 * KERN_SUCCESS if the mapping was created. Otherwise, returns an error
7437 * value. See pmap_enter_pde() for the possible error values when "no sleep",
7438 * "no replace", and "no reclaim" are specified.
7439 */
7440 static int
pmap_enter_2mpage(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,struct rwlock ** lockp)7441 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
7442 struct rwlock **lockp)
7443 {
7444 pd_entry_t newpde;
7445 pt_entry_t PG_V;
7446
7447 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7448 PG_V = pmap_valid_bit(pmap);
7449 newpde = VM_PAGE_TO_PHYS(m) |
7450 pmap_cache_bits(pmap, m->md.pat_mode, true) | PG_PS | PG_V;
7451 if ((m->oflags & VPO_UNMANAGED) == 0)
7452 newpde |= PG_MANAGED;
7453 if ((prot & VM_PROT_EXECUTE) == 0)
7454 newpde |= pg_nx;
7455 if (va < VM_MAXUSER_ADDRESS)
7456 newpde |= PG_U;
7457 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
7458 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp));
7459 }
7460
7461 /*
7462 * Returns true if every page table entry in the specified page table page is
7463 * zero.
7464 */
7465 static bool
pmap_every_pte_zero(vm_paddr_t pa)7466 pmap_every_pte_zero(vm_paddr_t pa)
7467 {
7468 pt_entry_t *pt_end, *pte;
7469
7470 KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
7471 pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
7472 for (pt_end = pte + NPTEPG; pte < pt_end; pte++) {
7473 if (*pte != 0)
7474 return (false);
7475 }
7476 return (true);
7477 }
7478
7479 /*
7480 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
7481 * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE,
7482 * KERN_PROTECTION_FAILURE, or KERN_RESOURCE_SHORTAGE otherwise. Returns
7483 * KERN_FAILURE if either (1) PMAP_ENTER_NOREPLACE was specified and a 4KB
7484 * page mapping already exists within the 2MB virtual address range starting
7485 * at the specified virtual address or (2) the requested 2MB page mapping is
7486 * not supported due to hardware errata. Returns KERN_NO_SPACE if
7487 * PMAP_ENTER_NOREPLACE was specified and a 2MB page mapping already exists at
7488 * the specified virtual address. Returns KERN_PROTECTION_FAILURE if the PKRU
7489 * settings are not the same across the 2MB virtual address range starting at
7490 * the specified virtual address. Returns KERN_RESOURCE_SHORTAGE if either
7491 * (1) PMAP_ENTER_NOSLEEP was specified and a page table page allocation
7492 * failed or (2) PMAP_ENTER_NORECLAIM was specified and a PV entry allocation
7493 * failed.
7494 *
7495 * The parameter "m" is only used when creating a managed, writeable mapping.
7496 */
7497 static int
pmap_enter_pde(pmap_t pmap,vm_offset_t va,pd_entry_t newpde,u_int flags,vm_page_t m,struct rwlock ** lockp)7498 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
7499 vm_page_t m, struct rwlock **lockp)
7500 {
7501 struct spglist free;
7502 pd_entry_t oldpde, *pde;
7503 pt_entry_t PG_G, PG_RW, PG_V;
7504 vm_page_t mt, pdpg;
7505 vm_page_t uwptpg;
7506
7507 PG_G = pmap_global_bit(pmap);
7508 PG_RW = pmap_rw_bit(pmap);
7509 KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
7510 ("pmap_enter_pde: newpde is missing PG_M"));
7511 PG_V = pmap_valid_bit(pmap);
7512 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7513
7514 if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
7515 newpde))) {
7516 CTR2(KTR_PMAP, "pmap_enter_pde: 2m x blocked for va %#lx"
7517 " in pmap %p", va, pmap);
7518 return (KERN_FAILURE);
7519 }
7520 if ((pde = pmap_alloc_pde(pmap, va, &pdpg, (flags &
7521 PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
7522 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7523 " in pmap %p", va, pmap);
7524 return (KERN_RESOURCE_SHORTAGE);
7525 }
7526
7527 /*
7528 * If pkru is not same for the whole pde range, return failure
7529 * and let vm_fault() cope. Check after pde allocation, since
7530 * it could sleep.
7531 */
7532 if (!pmap_pkru_same(pmap, va, va + NBPDR, &newpde)) {
7533 pmap_abort_ptp(pmap, va, pdpg);
7534 return (KERN_PROTECTION_FAILURE);
7535 }
7536
7537 /*
7538 * If there are existing mappings, either abort or remove them.
7539 */
7540 oldpde = *pde;
7541 if ((oldpde & PG_V) != 0) {
7542 KASSERT(pdpg == NULL || pdpg->ref_count > 1,
7543 ("pmap_enter_pde: pdpg's reference count is too low"));
7544 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
7545 if ((oldpde & PG_PS) != 0) {
7546 if (pdpg != NULL)
7547 pdpg->ref_count--;
7548 CTR2(KTR_PMAP,
7549 "pmap_enter_pde: no space for va %#lx"
7550 " in pmap %p", va, pmap);
7551 return (KERN_NO_SPACE);
7552 } else if (va < VM_MAXUSER_ADDRESS ||
7553 !pmap_every_pte_zero(oldpde & PG_FRAME)) {
7554 if (pdpg != NULL)
7555 pdpg->ref_count--;
7556 CTR2(KTR_PMAP,
7557 "pmap_enter_pde: failure for va %#lx"
7558 " in pmap %p", va, pmap);
7559 return (KERN_FAILURE);
7560 }
7561 }
7562 /* Break the existing mapping(s). */
7563 SLIST_INIT(&free);
7564 if ((oldpde & PG_PS) != 0) {
7565 /*
7566 * The reference to the PD page that was acquired by
7567 * pmap_alloc_pde() ensures that it won't be freed.
7568 * However, if the PDE resulted from a promotion, then
7569 * a reserved PT page could be freed.
7570 */
7571 (void)pmap_remove_pde(pmap, pde, va, &free, lockp);
7572 if ((oldpde & PG_G) == 0)
7573 pmap_invalidate_pde_page(pmap, va, oldpde);
7574 } else {
7575 pmap_delayed_invl_start();
7576 if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
7577 lockp))
7578 pmap_invalidate_all(pmap);
7579 pmap_delayed_invl_finish();
7580 }
7581 if (va < VM_MAXUSER_ADDRESS) {
7582 vm_page_free_pages_toq(&free, true);
7583 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
7584 pde));
7585 } else {
7586 KASSERT(SLIST_EMPTY(&free),
7587 ("pmap_enter_pde: freed kernel page table page"));
7588
7589 /*
7590 * Both pmap_remove_pde() and pmap_remove_ptes() will
7591 * leave the kernel page table page zero filled.
7592 */
7593 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7594 if (pmap_insert_pt_page(pmap, mt, false, false))
7595 panic("pmap_enter_pde: trie insert failed");
7596 }
7597 }
7598
7599 /*
7600 * Allocate leaf ptpage for wired userspace pages.
7601 */
7602 uwptpg = NULL;
7603 if ((newpde & PG_W) != 0 && pmap != kernel_pmap) {
7604 uwptpg = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va),
7605 VM_ALLOC_WIRED);
7606 if (uwptpg == NULL) {
7607 pmap_abort_ptp(pmap, va, pdpg);
7608 return (KERN_RESOURCE_SHORTAGE);
7609 }
7610 if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
7611 pmap_free_pt_page(pmap, uwptpg, false);
7612 pmap_abort_ptp(pmap, va, pdpg);
7613 return (KERN_RESOURCE_SHORTAGE);
7614 }
7615
7616 uwptpg->ref_count = NPTEPG;
7617 }
7618 if ((newpde & PG_MANAGED) != 0) {
7619 /*
7620 * Abort this mapping if its PV entry could not be created.
7621 */
7622 if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
7623 if (pdpg != NULL)
7624 pmap_abort_ptp(pmap, va, pdpg);
7625 if (uwptpg != NULL) {
7626 mt = pmap_remove_pt_page(pmap, va);
7627 KASSERT(mt == uwptpg,
7628 ("removed pt page %p, expected %p", mt,
7629 uwptpg));
7630 uwptpg->ref_count = 1;
7631 pmap_free_pt_page(pmap, uwptpg, false);
7632 }
7633 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7634 " in pmap %p", va, pmap);
7635 return (KERN_RESOURCE_SHORTAGE);
7636 }
7637 if ((newpde & PG_RW) != 0) {
7638 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
7639 vm_page_aflag_set(mt, PGA_WRITEABLE);
7640 }
7641 }
7642
7643 /*
7644 * Increment counters.
7645 */
7646 if ((newpde & PG_W) != 0)
7647 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
7648 pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE);
7649
7650 /*
7651 * Map the superpage. (This is not a promoted mapping; there will not
7652 * be any lingering 4KB page mappings in the TLB.)
7653 */
7654 pde_store(pde, newpde);
7655
7656 counter_u64_add(pmap_pde_mappings, 1);
7657 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
7658 va, pmap);
7659 return (KERN_SUCCESS);
7660 }
7661
7662 /*
7663 * Maps a sequence of resident pages belonging to the same object.
7664 * The sequence begins with the given page m_start. This page is
7665 * mapped at the given virtual address start. Each subsequent page is
7666 * mapped at a virtual address that is offset from start by the same
7667 * amount as the page is offset from m_start within the object. The
7668 * last page in the sequence is the page with the largest offset from
7669 * m_start that can be mapped at a virtual address less than the given
7670 * virtual address end. Not every virtual page between start and end
7671 * is mapped; only those for which a resident page exists with the
7672 * corresponding offset from m_start are mapped.
7673 */
7674 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)7675 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
7676 vm_page_t m_start, vm_prot_t prot)
7677 {
7678 struct rwlock *lock;
7679 vm_offset_t va;
7680 vm_page_t m, mpte;
7681 vm_pindex_t diff, psize;
7682 int rv;
7683
7684 VM_OBJECT_ASSERT_LOCKED(m_start->object);
7685
7686 psize = atop(end - start);
7687 mpte = NULL;
7688 m = m_start;
7689 lock = NULL;
7690 PMAP_LOCK(pmap);
7691 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
7692 va = start + ptoa(diff);
7693 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
7694 m->psind == 1 && pmap_ps_enabled(pmap) &&
7695 ((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) ==
7696 KERN_SUCCESS || rv == KERN_NO_SPACE))
7697 m = &m[NBPDR / PAGE_SIZE - 1];
7698 else
7699 mpte = pmap_enter_quick_locked(pmap, va, m, prot,
7700 mpte, &lock);
7701 m = TAILQ_NEXT(m, listq);
7702 }
7703 if (lock != NULL)
7704 rw_wunlock(lock);
7705 PMAP_UNLOCK(pmap);
7706 }
7707
7708 /*
7709 * this code makes some *MAJOR* assumptions:
7710 * 1. Current pmap & pmap exists.
7711 * 2. Not wired.
7712 * 3. Read access.
7713 * 4. No page table pages.
7714 * but is *MUCH* faster than pmap_enter...
7715 */
7716
7717 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)7718 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
7719 {
7720 struct rwlock *lock;
7721
7722 lock = NULL;
7723 PMAP_LOCK(pmap);
7724 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
7725 if (lock != NULL)
7726 rw_wunlock(lock);
7727 PMAP_UNLOCK(pmap);
7728 }
7729
7730 static vm_page_t
pmap_enter_quick_locked(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,vm_page_t mpte,struct rwlock ** lockp)7731 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
7732 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
7733 {
7734 pd_entry_t *pde;
7735 pt_entry_t newpte, *pte, PG_V;
7736
7737 KASSERT(!VA_IS_CLEANMAP(va) ||
7738 (m->oflags & VPO_UNMANAGED) != 0,
7739 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
7740 PG_V = pmap_valid_bit(pmap);
7741 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7742 pde = NULL;
7743
7744 /*
7745 * In the case that a page table page is not
7746 * resident, we are creating it here.
7747 */
7748 if (va < VM_MAXUSER_ADDRESS) {
7749 pdp_entry_t *pdpe;
7750 vm_pindex_t ptepindex;
7751
7752 /*
7753 * Calculate pagetable page index
7754 */
7755 ptepindex = pmap_pde_pindex(va);
7756 if (mpte && (mpte->pindex == ptepindex)) {
7757 mpte->ref_count++;
7758 } else {
7759 /*
7760 * If the page table page is mapped, we just increment
7761 * the hold count, and activate it. Otherwise, we
7762 * attempt to allocate a page table page, passing NULL
7763 * instead of the PV list lock pointer because we don't
7764 * intend to sleep. If this attempt fails, we don't
7765 * retry. Instead, we give up.
7766 */
7767 pdpe = pmap_pdpe(pmap, va);
7768 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
7769 if ((*pdpe & PG_PS) != 0)
7770 return (NULL);
7771 pde = pmap_pdpe_to_pde(pdpe, va);
7772 if ((*pde & PG_V) != 0) {
7773 if ((*pde & PG_PS) != 0)
7774 return (NULL);
7775 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7776 mpte->ref_count++;
7777 } else {
7778 mpte = pmap_allocpte_alloc(pmap,
7779 ptepindex, NULL, va);
7780 if (mpte == NULL)
7781 return (NULL);
7782 }
7783 } else {
7784 mpte = pmap_allocpte_alloc(pmap, ptepindex,
7785 NULL, va);
7786 if (mpte == NULL)
7787 return (NULL);
7788 }
7789 }
7790 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
7791 pte = &pte[pmap_pte_index(va)];
7792 } else {
7793 mpte = NULL;
7794 pte = vtopte(va);
7795 }
7796 if (*pte) {
7797 if (mpte != NULL)
7798 mpte->ref_count--;
7799 return (NULL);
7800 }
7801
7802 /*
7803 * Enter on the PV list if part of our managed memory.
7804 */
7805 if ((m->oflags & VPO_UNMANAGED) == 0 &&
7806 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
7807 if (mpte != NULL)
7808 pmap_abort_ptp(pmap, va, mpte);
7809 return (NULL);
7810 }
7811
7812 /*
7813 * Increment counters
7814 */
7815 pmap_resident_count_adj(pmap, 1);
7816
7817 newpte = VM_PAGE_TO_PHYS(m) | PG_V |
7818 pmap_cache_bits(pmap, m->md.pat_mode, false);
7819 if ((m->oflags & VPO_UNMANAGED) == 0)
7820 newpte |= PG_MANAGED;
7821 if ((prot & VM_PROT_EXECUTE) == 0)
7822 newpte |= pg_nx;
7823 if (va < VM_MAXUSER_ADDRESS)
7824 newpte |= PG_U | pmap_pkru_get(pmap, va);
7825 pte_store(pte, newpte);
7826
7827 #if VM_NRESERVLEVEL > 0
7828 /*
7829 * If both the PTP and the reservation are fully populated, then
7830 * attempt promotion.
7831 */
7832 if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
7833 (mpte == NULL || mpte->ref_count == NPTEPG) &&
7834 (m->flags & PG_FICTITIOUS) == 0 &&
7835 vm_reserv_level_iffullpop(m) == 0) {
7836 if (pde == NULL)
7837 pde = pmap_pde(pmap, va);
7838
7839 /*
7840 * If promotion succeeds, then the next call to this function
7841 * should not be given the unmapped PTP as a hint.
7842 */
7843 if (pmap_promote_pde(pmap, pde, va, mpte, lockp))
7844 mpte = NULL;
7845 }
7846 #endif
7847
7848 return (mpte);
7849 }
7850
7851 /*
7852 * Make a temporary mapping for a physical address. This is only intended
7853 * to be used for panic dumps.
7854 */
7855 void *
pmap_kenter_temporary(vm_paddr_t pa,int i)7856 pmap_kenter_temporary(vm_paddr_t pa, int i)
7857 {
7858 vm_offset_t va;
7859
7860 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
7861 pmap_kenter(va, pa);
7862 pmap_invlpg(kernel_pmap, va);
7863 return ((void *)crashdumpmap);
7864 }
7865
7866 /*
7867 * This code maps large physical mmap regions into the
7868 * processor address space. Note that some shortcuts
7869 * are taken, but the code works.
7870 */
7871 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)7872 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
7873 vm_pindex_t pindex, vm_size_t size)
7874 {
7875 pd_entry_t *pde;
7876 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
7877 vm_paddr_t pa, ptepa;
7878 vm_page_t p, pdpg;
7879 int pat_mode;
7880
7881 PG_A = pmap_accessed_bit(pmap);
7882 PG_M = pmap_modified_bit(pmap);
7883 PG_V = pmap_valid_bit(pmap);
7884 PG_RW = pmap_rw_bit(pmap);
7885
7886 VM_OBJECT_ASSERT_WLOCKED(object);
7887 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
7888 ("pmap_object_init_pt: non-device object"));
7889 if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
7890 if (!pmap_ps_enabled(pmap))
7891 return;
7892 if (!vm_object_populate(object, pindex, pindex + atop(size)))
7893 return;
7894 p = vm_page_lookup(object, pindex);
7895 KASSERT(vm_page_all_valid(p),
7896 ("pmap_object_init_pt: invalid page %p", p));
7897 pat_mode = p->md.pat_mode;
7898
7899 /*
7900 * Abort the mapping if the first page is not physically
7901 * aligned to a 2MB page boundary.
7902 */
7903 ptepa = VM_PAGE_TO_PHYS(p);
7904 if (ptepa & (NBPDR - 1))
7905 return;
7906
7907 /*
7908 * Skip the first page. Abort the mapping if the rest of
7909 * the pages are not physically contiguous or have differing
7910 * memory attributes.
7911 */
7912 p = TAILQ_NEXT(p, listq);
7913 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
7914 pa += PAGE_SIZE) {
7915 KASSERT(vm_page_all_valid(p),
7916 ("pmap_object_init_pt: invalid page %p", p));
7917 if (pa != VM_PAGE_TO_PHYS(p) ||
7918 pat_mode != p->md.pat_mode)
7919 return;
7920 p = TAILQ_NEXT(p, listq);
7921 }
7922
7923 /*
7924 * Map using 2MB pages. Since "ptepa" is 2M aligned and
7925 * "size" is a multiple of 2M, adding the PAT setting to "pa"
7926 * will not affect the termination of this loop.
7927 */
7928 PMAP_LOCK(pmap);
7929 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, true);
7930 pa < ptepa + size; pa += NBPDR) {
7931 pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL);
7932 if (pde == NULL) {
7933 /*
7934 * The creation of mappings below is only an
7935 * optimization. If a page directory page
7936 * cannot be allocated without blocking,
7937 * continue on to the next mapping rather than
7938 * blocking.
7939 */
7940 addr += NBPDR;
7941 continue;
7942 }
7943 if ((*pde & PG_V) == 0) {
7944 pde_store(pde, pa | PG_PS | PG_M | PG_A |
7945 PG_U | PG_RW | PG_V);
7946 pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE);
7947 counter_u64_add(pmap_pde_mappings, 1);
7948 } else {
7949 /* Continue on if the PDE is already valid. */
7950 pdpg->ref_count--;
7951 KASSERT(pdpg->ref_count > 0,
7952 ("pmap_object_init_pt: missing reference "
7953 "to page directory page, va: 0x%lx", addr));
7954 }
7955 addr += NBPDR;
7956 }
7957 PMAP_UNLOCK(pmap);
7958 }
7959 }
7960
7961 /*
7962 * Clear the wired attribute from the mappings for the specified range of
7963 * addresses in the given pmap. Every valid mapping within that range
7964 * must have the wired attribute set. In contrast, invalid mappings
7965 * cannot have the wired attribute set, so they are ignored.
7966 *
7967 * The wired attribute of the page table entry is not a hardware
7968 * feature, so there is no need to invalidate any TLB entries.
7969 * Since pmap_demote_pde() for the wired entry must never fail,
7970 * pmap_delayed_invl_start()/finish() calls around the
7971 * function are not needed.
7972 */
7973 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)7974 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
7975 {
7976 vm_offset_t va_next;
7977 pml4_entry_t *pml4e;
7978 pdp_entry_t *pdpe;
7979 pd_entry_t *pde;
7980 pt_entry_t *pte, PG_V, PG_G __diagused;
7981
7982 PG_V = pmap_valid_bit(pmap);
7983 PG_G = pmap_global_bit(pmap);
7984 PMAP_LOCK(pmap);
7985 for (; sva < eva; sva = va_next) {
7986 pml4e = pmap_pml4e(pmap, sva);
7987 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
7988 va_next = (sva + NBPML4) & ~PML4MASK;
7989 if (va_next < sva)
7990 va_next = eva;
7991 continue;
7992 }
7993
7994 va_next = (sva + NBPDP) & ~PDPMASK;
7995 if (va_next < sva)
7996 va_next = eva;
7997 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
7998 if ((*pdpe & PG_V) == 0)
7999 continue;
8000 if ((*pdpe & PG_PS) != 0) {
8001 KASSERT(va_next <= eva,
8002 ("partial update of non-transparent 1G mapping "
8003 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8004 *pdpe, sva, eva, va_next));
8005 MPASS(pmap != kernel_pmap); /* XXXKIB */
8006 MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
8007 atomic_clear_long(pdpe, PG_W);
8008 pmap->pm_stats.wired_count -= NBPDP / PAGE_SIZE;
8009 continue;
8010 }
8011
8012 va_next = (sva + NBPDR) & ~PDRMASK;
8013 if (va_next < sva)
8014 va_next = eva;
8015 pde = pmap_pdpe_to_pde(pdpe, sva);
8016 if ((*pde & PG_V) == 0)
8017 continue;
8018 if ((*pde & PG_PS) != 0) {
8019 if ((*pde & PG_W) == 0)
8020 panic("pmap_unwire: pde %#jx is missing PG_W",
8021 (uintmax_t)*pde);
8022
8023 /*
8024 * Are we unwiring the entire large page? If not,
8025 * demote the mapping and fall through.
8026 */
8027 if (sva + NBPDR == va_next && eva >= va_next) {
8028 atomic_clear_long(pde, PG_W);
8029 pmap->pm_stats.wired_count -= NBPDR /
8030 PAGE_SIZE;
8031 continue;
8032 } else if (!pmap_demote_pde(pmap, pde, sva))
8033 panic("pmap_unwire: demotion failed");
8034 }
8035 if (va_next > eva)
8036 va_next = eva;
8037 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
8038 sva += PAGE_SIZE) {
8039 if ((*pte & PG_V) == 0)
8040 continue;
8041 if ((*pte & PG_W) == 0)
8042 panic("pmap_unwire: pte %#jx is missing PG_W",
8043 (uintmax_t)*pte);
8044
8045 /*
8046 * PG_W must be cleared atomically. Although the pmap
8047 * lock synchronizes access to PG_W, another processor
8048 * could be setting PG_M and/or PG_A concurrently.
8049 */
8050 atomic_clear_long(pte, PG_W);
8051 pmap->pm_stats.wired_count--;
8052 }
8053 }
8054 PMAP_UNLOCK(pmap);
8055 }
8056
8057 /*
8058 * Copy the range specified by src_addr/len
8059 * from the source map to the range dst_addr/len
8060 * in the destination map.
8061 *
8062 * This routine is only advisory and need not do anything.
8063 */
8064 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)8065 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
8066 vm_offset_t src_addr)
8067 {
8068 struct rwlock *lock;
8069 pml4_entry_t *pml4e;
8070 pdp_entry_t *pdpe;
8071 pd_entry_t *pde, srcptepaddr;
8072 pt_entry_t *dst_pte, PG_A, PG_M, PG_V, ptetemp, *src_pte;
8073 vm_offset_t addr, end_addr, va_next;
8074 vm_page_t dst_pdpg, dstmpte, srcmpte;
8075
8076 if (dst_addr != src_addr)
8077 return;
8078
8079 if (dst_pmap->pm_type != src_pmap->pm_type)
8080 return;
8081
8082 /*
8083 * EPT page table entries that require emulation of A/D bits are
8084 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
8085 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
8086 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
8087 * implementations flag an EPT misconfiguration for exec-only
8088 * mappings we skip this function entirely for emulated pmaps.
8089 */
8090 if (pmap_emulate_ad_bits(dst_pmap))
8091 return;
8092
8093 end_addr = src_addr + len;
8094 lock = NULL;
8095 if (dst_pmap < src_pmap) {
8096 PMAP_LOCK(dst_pmap);
8097 PMAP_LOCK(src_pmap);
8098 } else {
8099 PMAP_LOCK(src_pmap);
8100 PMAP_LOCK(dst_pmap);
8101 }
8102
8103 PG_A = pmap_accessed_bit(dst_pmap);
8104 PG_M = pmap_modified_bit(dst_pmap);
8105 PG_V = pmap_valid_bit(dst_pmap);
8106
8107 for (addr = src_addr; addr < end_addr; addr = va_next) {
8108 KASSERT(addr < UPT_MIN_ADDRESS,
8109 ("pmap_copy: invalid to pmap_copy page tables"));
8110
8111 pml4e = pmap_pml4e(src_pmap, addr);
8112 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
8113 va_next = (addr + NBPML4) & ~PML4MASK;
8114 if (va_next < addr)
8115 va_next = end_addr;
8116 continue;
8117 }
8118
8119 va_next = (addr + NBPDP) & ~PDPMASK;
8120 if (va_next < addr)
8121 va_next = end_addr;
8122 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
8123 if ((*pdpe & PG_V) == 0)
8124 continue;
8125 if ((*pdpe & PG_PS) != 0) {
8126 KASSERT(va_next <= end_addr,
8127 ("partial update of non-transparent 1G mapping "
8128 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8129 *pdpe, addr, end_addr, va_next));
8130 MPASS((addr & PDPMASK) == 0);
8131 MPASS((*pdpe & PG_MANAGED) == 0);
8132 srcptepaddr = *pdpe;
8133 pdpe = pmap_pdpe(dst_pmap, addr);
8134 if (pdpe == NULL) {
8135 if (pmap_allocpte_alloc(dst_pmap,
8136 pmap_pml4e_pindex(addr), NULL, addr) ==
8137 NULL)
8138 break;
8139 pdpe = pmap_pdpe(dst_pmap, addr);
8140 } else {
8141 pml4e = pmap_pml4e(dst_pmap, addr);
8142 dst_pdpg = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
8143 dst_pdpg->ref_count++;
8144 }
8145 KASSERT(*pdpe == 0,
8146 ("1G mapping present in dst pmap "
8147 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8148 *pdpe, addr, end_addr, va_next));
8149 *pdpe = srcptepaddr & ~PG_W;
8150 pmap_resident_count_adj(dst_pmap, NBPDP / PAGE_SIZE);
8151 continue;
8152 }
8153
8154 va_next = (addr + NBPDR) & ~PDRMASK;
8155 if (va_next < addr)
8156 va_next = end_addr;
8157
8158 pde = pmap_pdpe_to_pde(pdpe, addr);
8159 srcptepaddr = *pde;
8160 if (srcptepaddr == 0)
8161 continue;
8162
8163 if (srcptepaddr & PG_PS) {
8164 /*
8165 * We can only virtual copy whole superpages.
8166 */
8167 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
8168 continue;
8169 pde = pmap_alloc_pde(dst_pmap, addr, &dst_pdpg, NULL);
8170 if (pde == NULL)
8171 break;
8172 if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
8173 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
8174 PMAP_ENTER_NORECLAIM, &lock))) {
8175 /*
8176 * We leave the dirty bit unchanged because
8177 * managed read/write superpage mappings are
8178 * required to be dirty. However, managed
8179 * superpage mappings are not required to
8180 * have their accessed bit set, so we clear
8181 * it because we don't know if this mapping
8182 * will be used.
8183 */
8184 srcptepaddr &= ~PG_W;
8185 if ((srcptepaddr & PG_MANAGED) != 0)
8186 srcptepaddr &= ~PG_A;
8187 *pde = srcptepaddr;
8188 pmap_resident_count_adj(dst_pmap, NBPDR /
8189 PAGE_SIZE);
8190 counter_u64_add(pmap_pde_mappings, 1);
8191 } else
8192 pmap_abort_ptp(dst_pmap, addr, dst_pdpg);
8193 continue;
8194 }
8195
8196 srcptepaddr &= PG_FRAME;
8197 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
8198 KASSERT(srcmpte->ref_count > 0,
8199 ("pmap_copy: source page table page is unused"));
8200
8201 if (va_next > end_addr)
8202 va_next = end_addr;
8203
8204 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
8205 src_pte = &src_pte[pmap_pte_index(addr)];
8206 dstmpte = NULL;
8207 for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
8208 ptetemp = *src_pte;
8209
8210 /*
8211 * We only virtual copy managed pages.
8212 */
8213 if ((ptetemp & PG_MANAGED) == 0)
8214 continue;
8215
8216 if (dstmpte != NULL) {
8217 KASSERT(dstmpte->pindex ==
8218 pmap_pde_pindex(addr),
8219 ("dstmpte pindex/addr mismatch"));
8220 dstmpte->ref_count++;
8221 } else if ((dstmpte = pmap_allocpte(dst_pmap, addr,
8222 NULL)) == NULL)
8223 goto out;
8224 dst_pte = (pt_entry_t *)
8225 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
8226 dst_pte = &dst_pte[pmap_pte_index(addr)];
8227 if (*dst_pte == 0 &&
8228 pmap_try_insert_pv_entry(dst_pmap, addr,
8229 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), &lock)) {
8230 /*
8231 * Clear the wired, modified, and accessed
8232 * (referenced) bits during the copy.
8233 */
8234 *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
8235 pmap_resident_count_adj(dst_pmap, 1);
8236 } else {
8237 pmap_abort_ptp(dst_pmap, addr, dstmpte);
8238 goto out;
8239 }
8240 /* Have we copied all of the valid mappings? */
8241 if (dstmpte->ref_count >= srcmpte->ref_count)
8242 break;
8243 }
8244 }
8245 out:
8246 if (lock != NULL)
8247 rw_wunlock(lock);
8248 PMAP_UNLOCK(src_pmap);
8249 PMAP_UNLOCK(dst_pmap);
8250 }
8251
8252 int
pmap_vmspace_copy(pmap_t dst_pmap,pmap_t src_pmap)8253 pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap)
8254 {
8255 int error;
8256
8257 if (dst_pmap->pm_type != src_pmap->pm_type ||
8258 dst_pmap->pm_type != PT_X86 ||
8259 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
8260 return (0);
8261 for (;;) {
8262 if (dst_pmap < src_pmap) {
8263 PMAP_LOCK(dst_pmap);
8264 PMAP_LOCK(src_pmap);
8265 } else {
8266 PMAP_LOCK(src_pmap);
8267 PMAP_LOCK(dst_pmap);
8268 }
8269 error = pmap_pkru_copy(dst_pmap, src_pmap);
8270 /* Clean up partial copy on failure due to no memory. */
8271 if (error == ENOMEM)
8272 pmap_pkru_deassign_all(dst_pmap);
8273 PMAP_UNLOCK(src_pmap);
8274 PMAP_UNLOCK(dst_pmap);
8275 if (error != ENOMEM)
8276 break;
8277 vm_wait(NULL);
8278 }
8279 return (error);
8280 }
8281
8282 /*
8283 * Zero the specified hardware page.
8284 */
8285 void
pmap_zero_page(vm_page_t m)8286 pmap_zero_page(vm_page_t m)
8287 {
8288 vm_offset_t va;
8289
8290 #ifdef TSLOG_PAGEZERO
8291 TSENTER();
8292 #endif
8293 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
8294 pagezero((void *)va);
8295 #ifdef TSLOG_PAGEZERO
8296 TSEXIT();
8297 #endif
8298 }
8299
8300 /*
8301 * Zero an area within a single hardware page. off and size must not
8302 * cover an area beyond a single hardware page.
8303 */
8304 void
pmap_zero_page_area(vm_page_t m,int off,int size)8305 pmap_zero_page_area(vm_page_t m, int off, int size)
8306 {
8307 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
8308
8309 if (off == 0 && size == PAGE_SIZE)
8310 pagezero((void *)va);
8311 else
8312 bzero((char *)va + off, size);
8313 }
8314
8315 /*
8316 * Copy 1 specified hardware page to another.
8317 */
8318 void
pmap_copy_page(vm_page_t msrc,vm_page_t mdst)8319 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
8320 {
8321 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
8322 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
8323
8324 pagecopy((void *)src, (void *)dst);
8325 }
8326
8327 int unmapped_buf_allowed = 1;
8328
8329 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)8330 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
8331 vm_offset_t b_offset, int xfersize)
8332 {
8333 void *a_cp, *b_cp;
8334 vm_page_t pages[2];
8335 vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
8336 int cnt;
8337 bool mapped;
8338
8339 while (xfersize > 0) {
8340 a_pg_offset = a_offset & PAGE_MASK;
8341 pages[0] = ma[a_offset >> PAGE_SHIFT];
8342 b_pg_offset = b_offset & PAGE_MASK;
8343 pages[1] = mb[b_offset >> PAGE_SHIFT];
8344 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
8345 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
8346 mapped = pmap_map_io_transient(pages, vaddr, 2, false);
8347 a_cp = (char *)vaddr[0] + a_pg_offset;
8348 b_cp = (char *)vaddr[1] + b_pg_offset;
8349 bcopy(a_cp, b_cp, cnt);
8350 if (__predict_false(mapped))
8351 pmap_unmap_io_transient(pages, vaddr, 2, false);
8352 a_offset += cnt;
8353 b_offset += cnt;
8354 xfersize -= cnt;
8355 }
8356 }
8357
8358 /*
8359 * Returns true if the pmap's pv is one of the first
8360 * 16 pvs linked to from this page. This count may
8361 * be changed upwards or downwards in the future; it
8362 * is only necessary that true be returned for a small
8363 * subset of pmaps for proper page aging.
8364 */
8365 bool
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)8366 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
8367 {
8368 struct md_page *pvh;
8369 struct rwlock *lock;
8370 pv_entry_t pv;
8371 int loops = 0;
8372 bool rv;
8373
8374 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8375 ("pmap_page_exists_quick: page %p is not managed", m));
8376 rv = false;
8377 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8378 rw_rlock(lock);
8379 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8380 if (PV_PMAP(pv) == pmap) {
8381 rv = true;
8382 break;
8383 }
8384 loops++;
8385 if (loops >= 16)
8386 break;
8387 }
8388 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
8389 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8390 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8391 if (PV_PMAP(pv) == pmap) {
8392 rv = true;
8393 break;
8394 }
8395 loops++;
8396 if (loops >= 16)
8397 break;
8398 }
8399 }
8400 rw_runlock(lock);
8401 return (rv);
8402 }
8403
8404 /*
8405 * pmap_page_wired_mappings:
8406 *
8407 * Return the number of managed mappings to the given physical page
8408 * that are wired.
8409 */
8410 int
pmap_page_wired_mappings(vm_page_t m)8411 pmap_page_wired_mappings(vm_page_t m)
8412 {
8413 struct rwlock *lock;
8414 struct md_page *pvh;
8415 pmap_t pmap;
8416 pt_entry_t *pte;
8417 pv_entry_t pv;
8418 int count, md_gen, pvh_gen;
8419
8420 if ((m->oflags & VPO_UNMANAGED) != 0)
8421 return (0);
8422 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8423 rw_rlock(lock);
8424 restart:
8425 count = 0;
8426 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8427 pmap = PV_PMAP(pv);
8428 if (!PMAP_TRYLOCK(pmap)) {
8429 md_gen = m->md.pv_gen;
8430 rw_runlock(lock);
8431 PMAP_LOCK(pmap);
8432 rw_rlock(lock);
8433 if (md_gen != m->md.pv_gen) {
8434 PMAP_UNLOCK(pmap);
8435 goto restart;
8436 }
8437 }
8438 pte = pmap_pte(pmap, pv->pv_va);
8439 if ((*pte & PG_W) != 0)
8440 count++;
8441 PMAP_UNLOCK(pmap);
8442 }
8443 if ((m->flags & PG_FICTITIOUS) == 0) {
8444 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8445 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8446 pmap = PV_PMAP(pv);
8447 if (!PMAP_TRYLOCK(pmap)) {
8448 md_gen = m->md.pv_gen;
8449 pvh_gen = pvh->pv_gen;
8450 rw_runlock(lock);
8451 PMAP_LOCK(pmap);
8452 rw_rlock(lock);
8453 if (md_gen != m->md.pv_gen ||
8454 pvh_gen != pvh->pv_gen) {
8455 PMAP_UNLOCK(pmap);
8456 goto restart;
8457 }
8458 }
8459 pte = pmap_pde(pmap, pv->pv_va);
8460 if ((*pte & PG_W) != 0)
8461 count++;
8462 PMAP_UNLOCK(pmap);
8463 }
8464 }
8465 rw_runlock(lock);
8466 return (count);
8467 }
8468
8469 /*
8470 * Returns true if the given page is mapped individually or as part of
8471 * a 2mpage. Otherwise, returns false.
8472 */
8473 bool
pmap_page_is_mapped(vm_page_t m)8474 pmap_page_is_mapped(vm_page_t m)
8475 {
8476 struct rwlock *lock;
8477 bool rv;
8478
8479 if ((m->oflags & VPO_UNMANAGED) != 0)
8480 return (false);
8481 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8482 rw_rlock(lock);
8483 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
8484 ((m->flags & PG_FICTITIOUS) == 0 &&
8485 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
8486 rw_runlock(lock);
8487 return (rv);
8488 }
8489
8490 /*
8491 * Destroy all managed, non-wired mappings in the given user-space
8492 * pmap. This pmap cannot be active on any processor besides the
8493 * caller.
8494 *
8495 * This function cannot be applied to the kernel pmap. Moreover, it
8496 * is not intended for general use. It is only to be used during
8497 * process termination. Consequently, it can be implemented in ways
8498 * that make it faster than pmap_remove(). First, it can more quickly
8499 * destroy mappings by iterating over the pmap's collection of PV
8500 * entries, rather than searching the page table. Second, it doesn't
8501 * have to test and clear the page table entries atomically, because
8502 * no processor is currently accessing the user address space. In
8503 * particular, a page table entry's dirty bit won't change state once
8504 * this function starts.
8505 *
8506 * Although this function destroys all of the pmap's managed,
8507 * non-wired mappings, it can delay and batch the invalidation of TLB
8508 * entries without calling pmap_delayed_invl_start() and
8509 * pmap_delayed_invl_finish(). Because the pmap is not active on
8510 * any other processor, none of these TLB entries will ever be used
8511 * before their eventual invalidation. Consequently, there is no need
8512 * for either pmap_remove_all() or pmap_remove_write() to wait for
8513 * that eventual TLB invalidation.
8514 */
8515 void
pmap_remove_pages(pmap_t pmap)8516 pmap_remove_pages(pmap_t pmap)
8517 {
8518 pd_entry_t ptepde;
8519 pt_entry_t *pte, tpte;
8520 pt_entry_t PG_M, PG_RW, PG_V;
8521 struct spglist free;
8522 struct pv_chunklist free_chunks[PMAP_MEMDOM];
8523 vm_page_t m, mpte, mt;
8524 pv_entry_t pv;
8525 struct md_page *pvh;
8526 struct pv_chunk *pc, *npc;
8527 struct rwlock *lock;
8528 int64_t bit;
8529 uint64_t inuse, bitmask;
8530 int allfree, field, i, idx;
8531 #ifdef PV_STATS
8532 int freed;
8533 #endif
8534 bool superpage;
8535 vm_paddr_t pa;
8536
8537 /*
8538 * Assert that the given pmap is only active on the current
8539 * CPU. Unfortunately, we cannot block another CPU from
8540 * activating the pmap while this function is executing.
8541 */
8542 KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
8543 #ifdef INVARIANTS
8544 {
8545 cpuset_t other_cpus;
8546
8547 other_cpus = all_cpus;
8548 critical_enter();
8549 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
8550 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
8551 critical_exit();
8552 KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
8553 }
8554 #endif
8555
8556 lock = NULL;
8557 PG_M = pmap_modified_bit(pmap);
8558 PG_V = pmap_valid_bit(pmap);
8559 PG_RW = pmap_rw_bit(pmap);
8560
8561 for (i = 0; i < PMAP_MEMDOM; i++)
8562 TAILQ_INIT(&free_chunks[i]);
8563 SLIST_INIT(&free);
8564 PMAP_LOCK(pmap);
8565 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
8566 allfree = 1;
8567 #ifdef PV_STATS
8568 freed = 0;
8569 #endif
8570 for (field = 0; field < _NPCM; field++) {
8571 inuse = ~pc->pc_map[field] & pc_freemask[field];
8572 while (inuse != 0) {
8573 bit = bsfq(inuse);
8574 bitmask = 1UL << bit;
8575 idx = field * 64 + bit;
8576 pv = &pc->pc_pventry[idx];
8577 inuse &= ~bitmask;
8578
8579 pte = pmap_pdpe(pmap, pv->pv_va);
8580 ptepde = *pte;
8581 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
8582 tpte = *pte;
8583 if ((tpte & (PG_PS | PG_V)) == PG_V) {
8584 superpage = false;
8585 ptepde = tpte;
8586 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
8587 PG_FRAME);
8588 pte = &pte[pmap_pte_index(pv->pv_va)];
8589 tpte = *pte;
8590 } else {
8591 /*
8592 * Keep track whether 'tpte' is a
8593 * superpage explicitly instead of
8594 * relying on PG_PS being set.
8595 *
8596 * This is because PG_PS is numerically
8597 * identical to PG_PTE_PAT and thus a
8598 * regular page could be mistaken for
8599 * a superpage.
8600 */
8601 superpage = true;
8602 }
8603
8604 if ((tpte & PG_V) == 0) {
8605 panic("bad pte va %lx pte %lx",
8606 pv->pv_va, tpte);
8607 }
8608
8609 /*
8610 * We cannot remove wired pages from a process' mapping at this time
8611 */
8612 if (tpte & PG_W) {
8613 allfree = 0;
8614 continue;
8615 }
8616
8617 /* Mark free */
8618 pc->pc_map[field] |= bitmask;
8619
8620 /*
8621 * Because this pmap is not active on other
8622 * processors, the dirty bit cannot have
8623 * changed state since we last loaded pte.
8624 */
8625 pte_clear(pte);
8626
8627 if (superpage)
8628 pa = tpte & PG_PS_FRAME;
8629 else
8630 pa = tpte & PG_FRAME;
8631
8632 m = PHYS_TO_VM_PAGE(pa);
8633 KASSERT(m->phys_addr == pa,
8634 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
8635 m, (uintmax_t)m->phys_addr,
8636 (uintmax_t)tpte));
8637
8638 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
8639 m < &vm_page_array[vm_page_array_size],
8640 ("pmap_remove_pages: bad tpte %#jx",
8641 (uintmax_t)tpte));
8642
8643 /*
8644 * Update the vm_page_t clean/reference bits.
8645 */
8646 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8647 if (superpage) {
8648 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8649 vm_page_dirty(mt);
8650 } else
8651 vm_page_dirty(m);
8652 }
8653
8654 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
8655
8656 if (superpage) {
8657 pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE);
8658 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
8659 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
8660 pvh->pv_gen++;
8661 if (TAILQ_EMPTY(&pvh->pv_list)) {
8662 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8663 if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
8664 TAILQ_EMPTY(&mt->md.pv_list))
8665 vm_page_aflag_clear(mt, PGA_WRITEABLE);
8666 }
8667 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
8668 if (mpte != NULL) {
8669 KASSERT(vm_page_any_valid(mpte),
8670 ("pmap_remove_pages: pte page not promoted"));
8671 pmap_pt_page_count_adj(pmap, -1);
8672 KASSERT(mpte->ref_count == NPTEPG,
8673 ("pmap_remove_pages: pte page reference count error"));
8674 mpte->ref_count = 0;
8675 pmap_add_delayed_free_list(mpte, &free, false);
8676 }
8677 } else {
8678 pmap_resident_count_adj(pmap, -1);
8679 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
8680 m->md.pv_gen++;
8681 if ((m->a.flags & PGA_WRITEABLE) != 0 &&
8682 TAILQ_EMPTY(&m->md.pv_list) &&
8683 (m->flags & PG_FICTITIOUS) == 0) {
8684 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8685 if (TAILQ_EMPTY(&pvh->pv_list))
8686 vm_page_aflag_clear(m, PGA_WRITEABLE);
8687 }
8688 }
8689 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
8690 #ifdef PV_STATS
8691 freed++;
8692 #endif
8693 }
8694 }
8695 PV_STAT(counter_u64_add(pv_entry_frees, freed));
8696 PV_STAT(counter_u64_add(pv_entry_spare, freed));
8697 PV_STAT(counter_u64_add(pv_entry_count, -freed));
8698 if (allfree) {
8699 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
8700 TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc, pc_list);
8701 }
8702 }
8703 if (lock != NULL)
8704 rw_wunlock(lock);
8705 pmap_invalidate_all(pmap);
8706 pmap_pkru_deassign_all(pmap);
8707 free_pv_chunk_batch((struct pv_chunklist *)&free_chunks);
8708 PMAP_UNLOCK(pmap);
8709 vm_page_free_pages_toq(&free, true);
8710 }
8711
8712 static bool
pmap_page_test_mappings(vm_page_t m,bool accessed,bool modified)8713 pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
8714 {
8715 struct rwlock *lock;
8716 pv_entry_t pv;
8717 struct md_page *pvh;
8718 pt_entry_t *pte, mask;
8719 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
8720 pmap_t pmap;
8721 int md_gen, pvh_gen;
8722 bool rv;
8723
8724 rv = false;
8725 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8726 rw_rlock(lock);
8727 restart:
8728 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8729 pmap = PV_PMAP(pv);
8730 if (!PMAP_TRYLOCK(pmap)) {
8731 md_gen = m->md.pv_gen;
8732 rw_runlock(lock);
8733 PMAP_LOCK(pmap);
8734 rw_rlock(lock);
8735 if (md_gen != m->md.pv_gen) {
8736 PMAP_UNLOCK(pmap);
8737 goto restart;
8738 }
8739 }
8740 pte = pmap_pte(pmap, pv->pv_va);
8741 mask = 0;
8742 if (modified) {
8743 PG_M = pmap_modified_bit(pmap);
8744 PG_RW = pmap_rw_bit(pmap);
8745 mask |= PG_RW | PG_M;
8746 }
8747 if (accessed) {
8748 PG_A = pmap_accessed_bit(pmap);
8749 PG_V = pmap_valid_bit(pmap);
8750 mask |= PG_V | PG_A;
8751 }
8752 rv = (*pte & mask) == mask;
8753 PMAP_UNLOCK(pmap);
8754 if (rv)
8755 goto out;
8756 }
8757 if ((m->flags & PG_FICTITIOUS) == 0) {
8758 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8759 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8760 pmap = PV_PMAP(pv);
8761 if (!PMAP_TRYLOCK(pmap)) {
8762 md_gen = m->md.pv_gen;
8763 pvh_gen = pvh->pv_gen;
8764 rw_runlock(lock);
8765 PMAP_LOCK(pmap);
8766 rw_rlock(lock);
8767 if (md_gen != m->md.pv_gen ||
8768 pvh_gen != pvh->pv_gen) {
8769 PMAP_UNLOCK(pmap);
8770 goto restart;
8771 }
8772 }
8773 pte = pmap_pde(pmap, pv->pv_va);
8774 mask = 0;
8775 if (modified) {
8776 PG_M = pmap_modified_bit(pmap);
8777 PG_RW = pmap_rw_bit(pmap);
8778 mask |= PG_RW | PG_M;
8779 }
8780 if (accessed) {
8781 PG_A = pmap_accessed_bit(pmap);
8782 PG_V = pmap_valid_bit(pmap);
8783 mask |= PG_V | PG_A;
8784 }
8785 rv = (*pte & mask) == mask;
8786 PMAP_UNLOCK(pmap);
8787 if (rv)
8788 goto out;
8789 }
8790 }
8791 out:
8792 rw_runlock(lock);
8793 return (rv);
8794 }
8795
8796 /*
8797 * pmap_is_modified:
8798 *
8799 * Return whether or not the specified physical page was modified
8800 * in any physical maps.
8801 */
8802 bool
pmap_is_modified(vm_page_t m)8803 pmap_is_modified(vm_page_t m)
8804 {
8805
8806 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8807 ("pmap_is_modified: page %p is not managed", m));
8808
8809 /*
8810 * If the page is not busied then this check is racy.
8811 */
8812 if (!pmap_page_is_write_mapped(m))
8813 return (false);
8814 return (pmap_page_test_mappings(m, false, true));
8815 }
8816
8817 /*
8818 * pmap_is_prefaultable:
8819 *
8820 * Return whether or not the specified virtual address is eligible
8821 * for prefault.
8822 */
8823 bool
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)8824 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
8825 {
8826 pd_entry_t *pde;
8827 pt_entry_t *pte, PG_V;
8828 bool rv;
8829
8830 PG_V = pmap_valid_bit(pmap);
8831
8832 /*
8833 * Return true if and only if the PTE for the specified virtual
8834 * address is allocated but invalid.
8835 */
8836 rv = false;
8837 PMAP_LOCK(pmap);
8838 pde = pmap_pde(pmap, addr);
8839 if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
8840 pte = pmap_pde_to_pte(pde, addr);
8841 rv = (*pte & PG_V) == 0;
8842 }
8843 PMAP_UNLOCK(pmap);
8844 return (rv);
8845 }
8846
8847 /*
8848 * pmap_is_referenced:
8849 *
8850 * Return whether or not the specified physical page was referenced
8851 * in any physical maps.
8852 */
8853 bool
pmap_is_referenced(vm_page_t m)8854 pmap_is_referenced(vm_page_t m)
8855 {
8856
8857 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8858 ("pmap_is_referenced: page %p is not managed", m));
8859 return (pmap_page_test_mappings(m, true, false));
8860 }
8861
8862 /*
8863 * Clear the write and modified bits in each of the given page's mappings.
8864 */
8865 void
pmap_remove_write(vm_page_t m)8866 pmap_remove_write(vm_page_t m)
8867 {
8868 struct md_page *pvh;
8869 pmap_t pmap;
8870 struct rwlock *lock;
8871 pv_entry_t next_pv, pv;
8872 pd_entry_t *pde;
8873 pt_entry_t oldpte, *pte, PG_M, PG_RW;
8874 vm_offset_t va;
8875 int pvh_gen, md_gen;
8876
8877 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8878 ("pmap_remove_write: page %p is not managed", m));
8879
8880 vm_page_assert_busied(m);
8881 if (!pmap_page_is_write_mapped(m))
8882 return;
8883
8884 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8885 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
8886 pa_to_pvh(VM_PAGE_TO_PHYS(m));
8887 rw_wlock(lock);
8888 retry:
8889 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
8890 pmap = PV_PMAP(pv);
8891 if (!PMAP_TRYLOCK(pmap)) {
8892 pvh_gen = pvh->pv_gen;
8893 rw_wunlock(lock);
8894 PMAP_LOCK(pmap);
8895 rw_wlock(lock);
8896 if (pvh_gen != pvh->pv_gen) {
8897 PMAP_UNLOCK(pmap);
8898 goto retry;
8899 }
8900 }
8901 PG_RW = pmap_rw_bit(pmap);
8902 va = pv->pv_va;
8903 pde = pmap_pde(pmap, va);
8904 if ((*pde & PG_RW) != 0)
8905 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
8906 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8907 ("inconsistent pv lock %p %p for page %p",
8908 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8909 PMAP_UNLOCK(pmap);
8910 }
8911 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8912 pmap = PV_PMAP(pv);
8913 if (!PMAP_TRYLOCK(pmap)) {
8914 pvh_gen = pvh->pv_gen;
8915 md_gen = m->md.pv_gen;
8916 rw_wunlock(lock);
8917 PMAP_LOCK(pmap);
8918 rw_wlock(lock);
8919 if (pvh_gen != pvh->pv_gen ||
8920 md_gen != m->md.pv_gen) {
8921 PMAP_UNLOCK(pmap);
8922 goto retry;
8923 }
8924 }
8925 PG_M = pmap_modified_bit(pmap);
8926 PG_RW = pmap_rw_bit(pmap);
8927 pde = pmap_pde(pmap, pv->pv_va);
8928 KASSERT((*pde & PG_PS) == 0,
8929 ("pmap_remove_write: found a 2mpage in page %p's pv list",
8930 m));
8931 pte = pmap_pde_to_pte(pde, pv->pv_va);
8932 oldpte = *pte;
8933 if (oldpte & PG_RW) {
8934 while (!atomic_fcmpset_long(pte, &oldpte, oldpte &
8935 ~(PG_RW | PG_M)))
8936 cpu_spinwait();
8937 if ((oldpte & PG_M) != 0)
8938 vm_page_dirty(m);
8939 pmap_invalidate_page(pmap, pv->pv_va);
8940 }
8941 PMAP_UNLOCK(pmap);
8942 }
8943 rw_wunlock(lock);
8944 vm_page_aflag_clear(m, PGA_WRITEABLE);
8945 pmap_delayed_invl_wait(m);
8946 }
8947
8948 /*
8949 * pmap_ts_referenced:
8950 *
8951 * Return a count of reference bits for a page, clearing those bits.
8952 * It is not necessary for every reference bit to be cleared, but it
8953 * is necessary that 0 only be returned when there are truly no
8954 * reference bits set.
8955 *
8956 * As an optimization, update the page's dirty field if a modified bit is
8957 * found while counting reference bits. This opportunistic update can be
8958 * performed at low cost and can eliminate the need for some future calls
8959 * to pmap_is_modified(). However, since this function stops after
8960 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
8961 * dirty pages. Those dirty pages will only be detected by a future call
8962 * to pmap_is_modified().
8963 *
8964 * A DI block is not needed within this function, because
8965 * invalidations are performed before the PV list lock is
8966 * released.
8967 */
8968 int
pmap_ts_referenced(vm_page_t m)8969 pmap_ts_referenced(vm_page_t m)
8970 {
8971 struct md_page *pvh;
8972 pv_entry_t pv, pvf;
8973 pmap_t pmap;
8974 struct rwlock *lock;
8975 pd_entry_t oldpde, *pde;
8976 pt_entry_t *pte, PG_A, PG_M, PG_RW;
8977 vm_offset_t va;
8978 vm_paddr_t pa;
8979 int cleared, md_gen, not_cleared, pvh_gen;
8980 struct spglist free;
8981 bool demoted;
8982
8983 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8984 ("pmap_ts_referenced: page %p is not managed", m));
8985 SLIST_INIT(&free);
8986 cleared = 0;
8987 pa = VM_PAGE_TO_PHYS(m);
8988 lock = PHYS_TO_PV_LIST_LOCK(pa);
8989 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
8990 rw_wlock(lock);
8991 retry:
8992 not_cleared = 0;
8993 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
8994 goto small_mappings;
8995 pv = pvf;
8996 do {
8997 if (pvf == NULL)
8998 pvf = pv;
8999 pmap = PV_PMAP(pv);
9000 if (!PMAP_TRYLOCK(pmap)) {
9001 pvh_gen = pvh->pv_gen;
9002 rw_wunlock(lock);
9003 PMAP_LOCK(pmap);
9004 rw_wlock(lock);
9005 if (pvh_gen != pvh->pv_gen) {
9006 PMAP_UNLOCK(pmap);
9007 goto retry;
9008 }
9009 }
9010 PG_A = pmap_accessed_bit(pmap);
9011 PG_M = pmap_modified_bit(pmap);
9012 PG_RW = pmap_rw_bit(pmap);
9013 va = pv->pv_va;
9014 pde = pmap_pde(pmap, pv->pv_va);
9015 oldpde = *pde;
9016 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9017 /*
9018 * Although "oldpde" is mapping a 2MB page, because
9019 * this function is called at a 4KB page granularity,
9020 * we only update the 4KB page under test.
9021 */
9022 vm_page_dirty(m);
9023 }
9024 if ((oldpde & PG_A) != 0) {
9025 /*
9026 * Since this reference bit is shared by 512 4KB
9027 * pages, it should not be cleared every time it is
9028 * tested. Apply a simple "hash" function on the
9029 * physical page number, the virtual superpage number,
9030 * and the pmap address to select one 4KB page out of
9031 * the 512 on which testing the reference bit will
9032 * result in clearing that reference bit. This
9033 * function is designed to avoid the selection of the
9034 * same 4KB page for every 2MB page mapping.
9035 *
9036 * On demotion, a mapping that hasn't been referenced
9037 * is simply destroyed. To avoid the possibility of a
9038 * subsequent page fault on a demoted wired mapping,
9039 * always leave its reference bit set. Moreover,
9040 * since the superpage is wired, the current state of
9041 * its reference bit won't affect page replacement.
9042 */
9043 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
9044 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
9045 (oldpde & PG_W) == 0) {
9046 if (safe_to_clear_referenced(pmap, oldpde)) {
9047 atomic_clear_long(pde, PG_A);
9048 pmap_invalidate_page(pmap, pv->pv_va);
9049 demoted = false;
9050 } else if (pmap_demote_pde_locked(pmap, pde,
9051 pv->pv_va, &lock)) {
9052 /*
9053 * Remove the mapping to a single page
9054 * so that a subsequent access may
9055 * repromote. Since the underlying
9056 * page table page is fully populated,
9057 * this removal never frees a page
9058 * table page.
9059 */
9060 demoted = true;
9061 va += VM_PAGE_TO_PHYS(m) - (oldpde &
9062 PG_PS_FRAME);
9063 pte = pmap_pde_to_pte(pde, va);
9064 pmap_remove_pte(pmap, pte, va, *pde,
9065 NULL, &lock);
9066 pmap_invalidate_page(pmap, va);
9067 } else
9068 demoted = true;
9069
9070 if (demoted) {
9071 /*
9072 * The superpage mapping was removed
9073 * entirely and therefore 'pv' is no
9074 * longer valid.
9075 */
9076 if (pvf == pv)
9077 pvf = NULL;
9078 pv = NULL;
9079 }
9080 cleared++;
9081 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
9082 ("inconsistent pv lock %p %p for page %p",
9083 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
9084 } else
9085 not_cleared++;
9086 }
9087 PMAP_UNLOCK(pmap);
9088 /* Rotate the PV list if it has more than one entry. */
9089 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
9090 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
9091 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
9092 pvh->pv_gen++;
9093 }
9094 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
9095 goto out;
9096 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
9097 small_mappings:
9098 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
9099 goto out;
9100 pv = pvf;
9101 do {
9102 if (pvf == NULL)
9103 pvf = pv;
9104 pmap = PV_PMAP(pv);
9105 if (!PMAP_TRYLOCK(pmap)) {
9106 pvh_gen = pvh->pv_gen;
9107 md_gen = m->md.pv_gen;
9108 rw_wunlock(lock);
9109 PMAP_LOCK(pmap);
9110 rw_wlock(lock);
9111 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
9112 PMAP_UNLOCK(pmap);
9113 goto retry;
9114 }
9115 }
9116 PG_A = pmap_accessed_bit(pmap);
9117 PG_M = pmap_modified_bit(pmap);
9118 PG_RW = pmap_rw_bit(pmap);
9119 pde = pmap_pde(pmap, pv->pv_va);
9120 KASSERT((*pde & PG_PS) == 0,
9121 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
9122 m));
9123 pte = pmap_pde_to_pte(pde, pv->pv_va);
9124 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
9125 vm_page_dirty(m);
9126 if ((*pte & PG_A) != 0) {
9127 if (safe_to_clear_referenced(pmap, *pte)) {
9128 atomic_clear_long(pte, PG_A);
9129 pmap_invalidate_page(pmap, pv->pv_va);
9130 cleared++;
9131 } else if ((*pte & PG_W) == 0) {
9132 /*
9133 * Wired pages cannot be paged out so
9134 * doing accessed bit emulation for
9135 * them is wasted effort. We do the
9136 * hard work for unwired pages only.
9137 */
9138 pmap_remove_pte(pmap, pte, pv->pv_va,
9139 *pde, &free, &lock);
9140 pmap_invalidate_page(pmap, pv->pv_va);
9141 cleared++;
9142 if (pvf == pv)
9143 pvf = NULL;
9144 pv = NULL;
9145 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
9146 ("inconsistent pv lock %p %p for page %p",
9147 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
9148 } else
9149 not_cleared++;
9150 }
9151 PMAP_UNLOCK(pmap);
9152 /* Rotate the PV list if it has more than one entry. */
9153 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
9154 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
9155 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
9156 m->md.pv_gen++;
9157 }
9158 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
9159 not_cleared < PMAP_TS_REFERENCED_MAX);
9160 out:
9161 rw_wunlock(lock);
9162 vm_page_free_pages_toq(&free, true);
9163 return (cleared + not_cleared);
9164 }
9165
9166 /*
9167 * Apply the given advice to the specified range of addresses within the
9168 * given pmap. Depending on the advice, clear the referenced and/or
9169 * modified flags in each mapping and set the mapped page's dirty field.
9170 */
9171 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)9172 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
9173 {
9174 struct rwlock *lock;
9175 pml4_entry_t *pml4e;
9176 pdp_entry_t *pdpe;
9177 pd_entry_t oldpde, *pde;
9178 pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
9179 vm_offset_t va, va_next;
9180 vm_page_t m;
9181 bool anychanged;
9182
9183 if (advice != MADV_DONTNEED && advice != MADV_FREE)
9184 return;
9185
9186 /*
9187 * A/D bit emulation requires an alternate code path when clearing
9188 * the modified and accessed bits below. Since this function is
9189 * advisory in nature we skip it entirely for pmaps that require
9190 * A/D bit emulation.
9191 */
9192 if (pmap_emulate_ad_bits(pmap))
9193 return;
9194
9195 PG_A = pmap_accessed_bit(pmap);
9196 PG_G = pmap_global_bit(pmap);
9197 PG_M = pmap_modified_bit(pmap);
9198 PG_V = pmap_valid_bit(pmap);
9199 PG_RW = pmap_rw_bit(pmap);
9200 anychanged = false;
9201 pmap_delayed_invl_start();
9202 PMAP_LOCK(pmap);
9203 for (; sva < eva; sva = va_next) {
9204 pml4e = pmap_pml4e(pmap, sva);
9205 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
9206 va_next = (sva + NBPML4) & ~PML4MASK;
9207 if (va_next < sva)
9208 va_next = eva;
9209 continue;
9210 }
9211
9212 va_next = (sva + NBPDP) & ~PDPMASK;
9213 if (va_next < sva)
9214 va_next = eva;
9215 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
9216 if ((*pdpe & PG_V) == 0)
9217 continue;
9218 if ((*pdpe & PG_PS) != 0)
9219 continue;
9220
9221 va_next = (sva + NBPDR) & ~PDRMASK;
9222 if (va_next < sva)
9223 va_next = eva;
9224 pde = pmap_pdpe_to_pde(pdpe, sva);
9225 oldpde = *pde;
9226 if ((oldpde & PG_V) == 0)
9227 continue;
9228 else if ((oldpde & PG_PS) != 0) {
9229 if ((oldpde & PG_MANAGED) == 0)
9230 continue;
9231 lock = NULL;
9232 if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
9233 if (lock != NULL)
9234 rw_wunlock(lock);
9235
9236 /*
9237 * The large page mapping was destroyed.
9238 */
9239 continue;
9240 }
9241
9242 /*
9243 * Unless the page mappings are wired, remove the
9244 * mapping to a single page so that a subsequent
9245 * access may repromote. Choosing the last page
9246 * within the address range [sva, min(va_next, eva))
9247 * generally results in more repromotions. Since the
9248 * underlying page table page is fully populated, this
9249 * removal never frees a page table page.
9250 */
9251 if ((oldpde & PG_W) == 0) {
9252 va = eva;
9253 if (va > va_next)
9254 va = va_next;
9255 va -= PAGE_SIZE;
9256 KASSERT(va >= sva,
9257 ("pmap_advise: no address gap"));
9258 pte = pmap_pde_to_pte(pde, va);
9259 KASSERT((*pte & PG_V) != 0,
9260 ("pmap_advise: invalid PTE"));
9261 pmap_remove_pte(pmap, pte, va, *pde, NULL,
9262 &lock);
9263 anychanged = true;
9264 }
9265 if (lock != NULL)
9266 rw_wunlock(lock);
9267 }
9268 if (va_next > eva)
9269 va_next = eva;
9270 va = va_next;
9271 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
9272 sva += PAGE_SIZE) {
9273 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
9274 goto maybe_invlrng;
9275 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9276 if (advice == MADV_DONTNEED) {
9277 /*
9278 * Future calls to pmap_is_modified()
9279 * can be avoided by making the page
9280 * dirty now.
9281 */
9282 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
9283 vm_page_dirty(m);
9284 }
9285 atomic_clear_long(pte, PG_M | PG_A);
9286 } else if ((*pte & PG_A) != 0)
9287 atomic_clear_long(pte, PG_A);
9288 else
9289 goto maybe_invlrng;
9290
9291 if ((*pte & PG_G) != 0) {
9292 if (va == va_next)
9293 va = sva;
9294 } else
9295 anychanged = true;
9296 continue;
9297 maybe_invlrng:
9298 if (va != va_next) {
9299 pmap_invalidate_range(pmap, va, sva);
9300 va = va_next;
9301 }
9302 }
9303 if (va != va_next)
9304 pmap_invalidate_range(pmap, va, sva);
9305 }
9306 if (anychanged)
9307 pmap_invalidate_all(pmap);
9308 PMAP_UNLOCK(pmap);
9309 pmap_delayed_invl_finish();
9310 }
9311
9312 /*
9313 * Clear the modify bits on the specified physical page.
9314 */
9315 void
pmap_clear_modify(vm_page_t m)9316 pmap_clear_modify(vm_page_t m)
9317 {
9318 struct md_page *pvh;
9319 pmap_t pmap;
9320 pv_entry_t next_pv, pv;
9321 pd_entry_t oldpde, *pde;
9322 pt_entry_t *pte, PG_M, PG_RW;
9323 struct rwlock *lock;
9324 vm_offset_t va;
9325 int md_gen, pvh_gen;
9326
9327 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
9328 ("pmap_clear_modify: page %p is not managed", m));
9329 vm_page_assert_busied(m);
9330
9331 if (!pmap_page_is_write_mapped(m))
9332 return;
9333 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
9334 pa_to_pvh(VM_PAGE_TO_PHYS(m));
9335 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
9336 rw_wlock(lock);
9337 restart:
9338 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
9339 pmap = PV_PMAP(pv);
9340 if (!PMAP_TRYLOCK(pmap)) {
9341 pvh_gen = pvh->pv_gen;
9342 rw_wunlock(lock);
9343 PMAP_LOCK(pmap);
9344 rw_wlock(lock);
9345 if (pvh_gen != pvh->pv_gen) {
9346 PMAP_UNLOCK(pmap);
9347 goto restart;
9348 }
9349 }
9350 PG_M = pmap_modified_bit(pmap);
9351 PG_RW = pmap_rw_bit(pmap);
9352 va = pv->pv_va;
9353 pde = pmap_pde(pmap, va);
9354 oldpde = *pde;
9355 /* If oldpde has PG_RW set, then it also has PG_M set. */
9356 if ((oldpde & PG_RW) != 0 &&
9357 pmap_demote_pde_locked(pmap, pde, va, &lock) &&
9358 (oldpde & PG_W) == 0) {
9359 /*
9360 * Write protect the mapping to a single page so that
9361 * a subsequent write access may repromote.
9362 */
9363 va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME);
9364 pte = pmap_pde_to_pte(pde, va);
9365 atomic_clear_long(pte, PG_M | PG_RW);
9366 vm_page_dirty(m);
9367 pmap_invalidate_page(pmap, va);
9368 }
9369 PMAP_UNLOCK(pmap);
9370 }
9371 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
9372 pmap = PV_PMAP(pv);
9373 if (!PMAP_TRYLOCK(pmap)) {
9374 md_gen = m->md.pv_gen;
9375 pvh_gen = pvh->pv_gen;
9376 rw_wunlock(lock);
9377 PMAP_LOCK(pmap);
9378 rw_wlock(lock);
9379 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
9380 PMAP_UNLOCK(pmap);
9381 goto restart;
9382 }
9383 }
9384 PG_M = pmap_modified_bit(pmap);
9385 PG_RW = pmap_rw_bit(pmap);
9386 pde = pmap_pde(pmap, pv->pv_va);
9387 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
9388 " a 2mpage in page %p's pv list", m));
9389 pte = pmap_pde_to_pte(pde, pv->pv_va);
9390 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9391 atomic_clear_long(pte, PG_M);
9392 pmap_invalidate_page(pmap, pv->pv_va);
9393 }
9394 PMAP_UNLOCK(pmap);
9395 }
9396 rw_wunlock(lock);
9397 }
9398
9399 /*
9400 * Miscellaneous support routines follow
9401 */
9402
9403 /* Adjust the properties for a leaf page table entry. */
9404 static __inline void
pmap_pte_props(pt_entry_t * pte,u_long bits,u_long mask)9405 pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask)
9406 {
9407 u_long opte, npte;
9408
9409 opte = *(u_long *)pte;
9410 do {
9411 npte = opte & ~mask;
9412 npte |= bits;
9413 } while (npte != opte && !atomic_fcmpset_long((u_long *)pte, &opte,
9414 npte));
9415 }
9416
9417 /*
9418 * Map a set of physical memory pages into the kernel virtual
9419 * address space. Return a pointer to where it is mapped. This
9420 * routine is intended to be used for mapping device memory,
9421 * NOT real memory.
9422 */
9423 static void *
pmap_mapdev_internal(vm_paddr_t pa,vm_size_t size,int mode,int flags)9424 pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags)
9425 {
9426 struct pmap_preinit_mapping *ppim;
9427 vm_offset_t va, offset;
9428 vm_size_t tmpsize;
9429 int i;
9430
9431 offset = pa & PAGE_MASK;
9432 size = round_page(offset + size);
9433 pa = trunc_page(pa);
9434
9435 if (!pmap_initialized) {
9436 va = 0;
9437 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9438 ppim = pmap_preinit_mapping + i;
9439 if (ppim->va == 0) {
9440 ppim->pa = pa;
9441 ppim->sz = size;
9442 ppim->mode = mode;
9443 ppim->va = virtual_avail;
9444 virtual_avail += size;
9445 va = ppim->va;
9446 break;
9447 }
9448 }
9449 if (va == 0)
9450 panic("%s: too many preinit mappings", __func__);
9451 } else {
9452 /*
9453 * If we have a preinit mapping, reuse it.
9454 */
9455 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9456 ppim = pmap_preinit_mapping + i;
9457 if (ppim->pa == pa && ppim->sz == size &&
9458 (ppim->mode == mode ||
9459 (flags & MAPDEV_SETATTR) == 0))
9460 return ((void *)(ppim->va + offset));
9461 }
9462 /*
9463 * If the specified range of physical addresses fits within
9464 * the direct map window, use the direct map.
9465 */
9466 if (pa < dmaplimit && pa + size <= dmaplimit) {
9467 va = PHYS_TO_DMAP(pa);
9468 if ((flags & MAPDEV_SETATTR) != 0) {
9469 PMAP_LOCK(kernel_pmap);
9470 i = pmap_change_props_locked(va, size,
9471 PROT_NONE, mode, flags);
9472 PMAP_UNLOCK(kernel_pmap);
9473 } else
9474 i = 0;
9475 if (!i)
9476 return ((void *)(va + offset));
9477 }
9478 va = kva_alloc(size);
9479 if (va == 0)
9480 panic("%s: Couldn't allocate KVA", __func__);
9481 }
9482 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
9483 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
9484 pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
9485 if ((flags & MAPDEV_FLUSHCACHE) != 0)
9486 pmap_invalidate_cache_range(va, va + tmpsize);
9487 return ((void *)(va + offset));
9488 }
9489
9490 void *
pmap_mapdev_attr(vm_paddr_t pa,vm_size_t size,int mode)9491 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
9492 {
9493
9494 return (pmap_mapdev_internal(pa, size, mode, MAPDEV_FLUSHCACHE |
9495 MAPDEV_SETATTR));
9496 }
9497
9498 void *
pmap_mapdev(vm_paddr_t pa,vm_size_t size)9499 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
9500 {
9501
9502 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
9503 }
9504
9505 void *
pmap_mapdev_pciecfg(vm_paddr_t pa,vm_size_t size)9506 pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size)
9507 {
9508
9509 return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE,
9510 MAPDEV_SETATTR));
9511 }
9512
9513 void *
pmap_mapbios(vm_paddr_t pa,vm_size_t size)9514 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
9515 {
9516
9517 return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK,
9518 MAPDEV_FLUSHCACHE));
9519 }
9520
9521 void
pmap_unmapdev(void * p,vm_size_t size)9522 pmap_unmapdev(void *p, vm_size_t size)
9523 {
9524 struct pmap_preinit_mapping *ppim;
9525 vm_offset_t offset, va;
9526 int i;
9527
9528 va = (vm_offset_t)p;
9529
9530 /* If we gave a direct map region in pmap_mapdev, do nothing */
9531 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
9532 return;
9533 offset = va & PAGE_MASK;
9534 size = round_page(offset + size);
9535 va = trunc_page(va);
9536 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9537 ppim = pmap_preinit_mapping + i;
9538 if (ppim->va == va && ppim->sz == size) {
9539 if (pmap_initialized)
9540 return;
9541 ppim->pa = 0;
9542 ppim->va = 0;
9543 ppim->sz = 0;
9544 ppim->mode = 0;
9545 if (va + size == virtual_avail)
9546 virtual_avail = va;
9547 return;
9548 }
9549 }
9550 if (pmap_initialized) {
9551 pmap_qremove(va, atop(size));
9552 kva_free(va, size);
9553 }
9554 }
9555
9556 /*
9557 * Tries to demote a 1GB page mapping.
9558 */
9559 static bool
pmap_demote_pdpe(pmap_t pmap,pdp_entry_t * pdpe,vm_offset_t va)9560 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
9561 {
9562 pdp_entry_t newpdpe, oldpdpe;
9563 pd_entry_t *firstpde, newpde, *pde;
9564 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
9565 vm_paddr_t pdpgpa;
9566 vm_page_t pdpg;
9567
9568 PG_A = pmap_accessed_bit(pmap);
9569 PG_M = pmap_modified_bit(pmap);
9570 PG_V = pmap_valid_bit(pmap);
9571 PG_RW = pmap_rw_bit(pmap);
9572
9573 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9574 oldpdpe = *pdpe;
9575 KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
9576 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
9577 pdpg = pmap_alloc_pt_page(pmap, va >> PDPSHIFT,
9578 VM_ALLOC_WIRED | VM_ALLOC_INTERRUPT);
9579 if (pdpg == NULL) {
9580 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
9581 " in pmap %p", va, pmap);
9582 return (false);
9583 }
9584 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
9585 firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
9586 newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
9587 KASSERT((oldpdpe & PG_A) != 0,
9588 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
9589 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
9590 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
9591 newpde = oldpdpe;
9592
9593 /*
9594 * Initialize the page directory page.
9595 */
9596 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
9597 *pde = newpde;
9598 newpde += NBPDR;
9599 }
9600
9601 /*
9602 * Demote the mapping.
9603 */
9604 *pdpe = newpdpe;
9605
9606 /*
9607 * Invalidate a stale recursive mapping of the page directory page.
9608 */
9609 pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
9610
9611 counter_u64_add(pmap_pdpe_demotions, 1);
9612 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
9613 " in pmap %p", va, pmap);
9614 return (true);
9615 }
9616
9617 /*
9618 * Sets the memory attribute for the specified page.
9619 */
9620 void
pmap_page_set_memattr(vm_page_t m,vm_memattr_t ma)9621 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
9622 {
9623
9624 m->md.pat_mode = ma;
9625
9626 /*
9627 * If "m" is a normal page, update its direct mapping. This update
9628 * can be relied upon to perform any cache operations that are
9629 * required for data coherence.
9630 */
9631 if ((m->flags & PG_FICTITIOUS) == 0 &&
9632 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
9633 m->md.pat_mode))
9634 panic("memory attribute change on the direct map failed");
9635 }
9636
9637 void
pmap_page_set_memattr_noflush(vm_page_t m,vm_memattr_t ma)9638 pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma)
9639 {
9640 int error;
9641
9642 m->md.pat_mode = ma;
9643
9644 if ((m->flags & PG_FICTITIOUS) != 0)
9645 return;
9646 PMAP_LOCK(kernel_pmap);
9647 error = pmap_change_props_locked(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
9648 PAGE_SIZE, PROT_NONE, m->md.pat_mode, 0);
9649 PMAP_UNLOCK(kernel_pmap);
9650 if (error != 0)
9651 panic("memory attribute change on the direct map failed");
9652 }
9653
9654 /*
9655 * Changes the specified virtual address range's memory type to that given by
9656 * the parameter "mode". The specified virtual address range must be
9657 * completely contained within either the direct map or the kernel map. If
9658 * the virtual address range is contained within the kernel map, then the
9659 * memory type for each of the corresponding ranges of the direct map is also
9660 * changed. (The corresponding ranges of the direct map are those ranges that
9661 * map the same physical pages as the specified virtual address range.) These
9662 * changes to the direct map are necessary because Intel describes the
9663 * behavior of their processors as "undefined" if two or more mappings to the
9664 * same physical page have different memory types.
9665 *
9666 * Returns zero if the change completed successfully, and either EINVAL or
9667 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
9668 * of the virtual address range was not mapped, and ENOMEM is returned if
9669 * there was insufficient memory available to complete the change. In the
9670 * latter case, the memory type may have been changed on some part of the
9671 * virtual address range or the direct map.
9672 */
9673 int
pmap_change_attr(vm_offset_t va,vm_size_t size,int mode)9674 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
9675 {
9676 int error;
9677
9678 PMAP_LOCK(kernel_pmap);
9679 error = pmap_change_props_locked(va, size, PROT_NONE, mode,
9680 MAPDEV_FLUSHCACHE);
9681 PMAP_UNLOCK(kernel_pmap);
9682 return (error);
9683 }
9684
9685 /*
9686 * Changes the specified virtual address range's protections to those
9687 * specified by "prot". Like pmap_change_attr(), protections for aliases
9688 * in the direct map are updated as well. Protections on aliasing mappings may
9689 * be a subset of the requested protections; for example, mappings in the direct
9690 * map are never executable.
9691 */
9692 int
pmap_change_prot(vm_offset_t va,vm_size_t size,vm_prot_t prot)9693 pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
9694 {
9695 int error;
9696
9697 /* Only supported within the kernel map. */
9698 if (va < VM_MIN_KERNEL_ADDRESS)
9699 return (EINVAL);
9700
9701 PMAP_LOCK(kernel_pmap);
9702 error = pmap_change_props_locked(va, size, prot, -1,
9703 MAPDEV_ASSERTVALID);
9704 PMAP_UNLOCK(kernel_pmap);
9705 return (error);
9706 }
9707
9708 static int
pmap_change_props_locked(vm_offset_t va,vm_size_t size,vm_prot_t prot,int mode,int flags)9709 pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
9710 int mode, int flags)
9711 {
9712 vm_offset_t base, offset, tmpva;
9713 vm_paddr_t pa_start, pa_end, pa_end1;
9714 pdp_entry_t *pdpe;
9715 pd_entry_t *pde, pde_bits, pde_mask;
9716 pt_entry_t *pte, pte_bits, pte_mask;
9717 int error;
9718 bool changed;
9719
9720 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
9721 base = trunc_page(va);
9722 offset = va & PAGE_MASK;
9723 size = round_page(offset + size);
9724
9725 /*
9726 * Only supported on kernel virtual addresses, including the direct
9727 * map but excluding the recursive map.
9728 */
9729 if (base < DMAP_MIN_ADDRESS)
9730 return (EINVAL);
9731
9732 /*
9733 * Construct our flag sets and masks. "bits" is the subset of
9734 * "mask" that will be set in each modified PTE.
9735 *
9736 * Mappings in the direct map are never allowed to be executable.
9737 */
9738 pde_bits = pte_bits = 0;
9739 pde_mask = pte_mask = 0;
9740 if (mode != -1) {
9741 pde_bits |= pmap_cache_bits(kernel_pmap, mode, true);
9742 pde_mask |= X86_PG_PDE_CACHE;
9743 pte_bits |= pmap_cache_bits(kernel_pmap, mode, false);
9744 pte_mask |= X86_PG_PTE_CACHE;
9745 }
9746 if (prot != VM_PROT_NONE) {
9747 if ((prot & VM_PROT_WRITE) != 0) {
9748 pde_bits |= X86_PG_RW;
9749 pte_bits |= X86_PG_RW;
9750 }
9751 if ((prot & VM_PROT_EXECUTE) == 0 ||
9752 va < VM_MIN_KERNEL_ADDRESS) {
9753 pde_bits |= pg_nx;
9754 pte_bits |= pg_nx;
9755 }
9756 pde_mask |= X86_PG_RW | pg_nx;
9757 pte_mask |= X86_PG_RW | pg_nx;
9758 }
9759
9760 /*
9761 * Pages that aren't mapped aren't supported. Also break down 2MB pages
9762 * into 4KB pages if required.
9763 */
9764 for (tmpva = base; tmpva < base + size; ) {
9765 pdpe = pmap_pdpe(kernel_pmap, tmpva);
9766 if (pdpe == NULL || *pdpe == 0) {
9767 KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9768 ("%s: addr %#lx is not mapped", __func__, tmpva));
9769 return (EINVAL);
9770 }
9771 if (*pdpe & PG_PS) {
9772 /*
9773 * If the current 1GB page already has the required
9774 * properties, then we need not demote this page. Just
9775 * increment tmpva to the next 1GB page frame.
9776 */
9777 if ((*pdpe & pde_mask) == pde_bits) {
9778 tmpva = trunc_1gpage(tmpva) + NBPDP;
9779 continue;
9780 }
9781
9782 /*
9783 * If the current offset aligns with a 1GB page frame
9784 * and there is at least 1GB left within the range, then
9785 * we need not break down this page into 2MB pages.
9786 */
9787 if ((tmpva & PDPMASK) == 0 &&
9788 tmpva + PDPMASK < base + size) {
9789 tmpva += NBPDP;
9790 continue;
9791 }
9792 if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
9793 return (ENOMEM);
9794 }
9795 pde = pmap_pdpe_to_pde(pdpe, tmpva);
9796 if (*pde == 0) {
9797 KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9798 ("%s: addr %#lx is not mapped", __func__, tmpva));
9799 return (EINVAL);
9800 }
9801 if (*pde & PG_PS) {
9802 /*
9803 * If the current 2MB page already has the required
9804 * properties, then we need not demote this page. Just
9805 * increment tmpva to the next 2MB page frame.
9806 */
9807 if ((*pde & pde_mask) == pde_bits) {
9808 tmpva = trunc_2mpage(tmpva) + NBPDR;
9809 continue;
9810 }
9811
9812 /*
9813 * If the current offset aligns with a 2MB page frame
9814 * and there is at least 2MB left within the range, then
9815 * we need not break down this page into 4KB pages.
9816 */
9817 if ((tmpva & PDRMASK) == 0 &&
9818 tmpva + PDRMASK < base + size) {
9819 tmpva += NBPDR;
9820 continue;
9821 }
9822 if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
9823 return (ENOMEM);
9824 }
9825 pte = pmap_pde_to_pte(pde, tmpva);
9826 if (*pte == 0) {
9827 KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9828 ("%s: addr %#lx is not mapped", __func__, tmpva));
9829 return (EINVAL);
9830 }
9831 tmpva += PAGE_SIZE;
9832 }
9833 error = 0;
9834
9835 /*
9836 * Ok, all the pages exist, so run through them updating their
9837 * properties if required.
9838 */
9839 changed = false;
9840 pa_start = pa_end = 0;
9841 for (tmpva = base; tmpva < base + size; ) {
9842 pdpe = pmap_pdpe(kernel_pmap, tmpva);
9843 if (*pdpe & PG_PS) {
9844 if ((*pdpe & pde_mask) != pde_bits) {
9845 pmap_pte_props(pdpe, pde_bits, pde_mask);
9846 changed = true;
9847 }
9848 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9849 (*pdpe & PG_PS_FRAME) < dmaplimit) {
9850 if (pa_start == pa_end) {
9851 /* Start physical address run. */
9852 pa_start = *pdpe & PG_PS_FRAME;
9853 pa_end = pa_start + NBPDP;
9854 } else if (pa_end == (*pdpe & PG_PS_FRAME))
9855 pa_end += NBPDP;
9856 else {
9857 /* Run ended, update direct map. */
9858 error = pmap_change_props_locked(
9859 PHYS_TO_DMAP(pa_start),
9860 pa_end - pa_start, prot, mode,
9861 flags);
9862 if (error != 0)
9863 break;
9864 /* Start physical address run. */
9865 pa_start = *pdpe & PG_PS_FRAME;
9866 pa_end = pa_start + NBPDP;
9867 }
9868 }
9869 tmpva = trunc_1gpage(tmpva) + NBPDP;
9870 continue;
9871 }
9872 pde = pmap_pdpe_to_pde(pdpe, tmpva);
9873 if (*pde & PG_PS) {
9874 if ((*pde & pde_mask) != pde_bits) {
9875 pmap_pte_props(pde, pde_bits, pde_mask);
9876 changed = true;
9877 }
9878 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9879 (*pde & PG_PS_FRAME) < dmaplimit) {
9880 if (pa_start == pa_end) {
9881 /* Start physical address run. */
9882 pa_start = *pde & PG_PS_FRAME;
9883 pa_end = pa_start + NBPDR;
9884 } else if (pa_end == (*pde & PG_PS_FRAME))
9885 pa_end += NBPDR;
9886 else {
9887 /* Run ended, update direct map. */
9888 error = pmap_change_props_locked(
9889 PHYS_TO_DMAP(pa_start),
9890 pa_end - pa_start, prot, mode,
9891 flags);
9892 if (error != 0)
9893 break;
9894 /* Start physical address run. */
9895 pa_start = *pde & PG_PS_FRAME;
9896 pa_end = pa_start + NBPDR;
9897 }
9898 }
9899 tmpva = trunc_2mpage(tmpva) + NBPDR;
9900 } else {
9901 pte = pmap_pde_to_pte(pde, tmpva);
9902 if ((*pte & pte_mask) != pte_bits) {
9903 pmap_pte_props(pte, pte_bits, pte_mask);
9904 changed = true;
9905 }
9906 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9907 (*pte & PG_FRAME) < dmaplimit) {
9908 if (pa_start == pa_end) {
9909 /* Start physical address run. */
9910 pa_start = *pte & PG_FRAME;
9911 pa_end = pa_start + PAGE_SIZE;
9912 } else if (pa_end == (*pte & PG_FRAME))
9913 pa_end += PAGE_SIZE;
9914 else {
9915 /* Run ended, update direct map. */
9916 error = pmap_change_props_locked(
9917 PHYS_TO_DMAP(pa_start),
9918 pa_end - pa_start, prot, mode,
9919 flags);
9920 if (error != 0)
9921 break;
9922 /* Start physical address run. */
9923 pa_start = *pte & PG_FRAME;
9924 pa_end = pa_start + PAGE_SIZE;
9925 }
9926 }
9927 tmpva += PAGE_SIZE;
9928 }
9929 }
9930 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
9931 pa_end1 = MIN(pa_end, dmaplimit);
9932 if (pa_start != pa_end1)
9933 error = pmap_change_props_locked(PHYS_TO_DMAP(pa_start),
9934 pa_end1 - pa_start, prot, mode, flags);
9935 }
9936
9937 /*
9938 * Flush CPU caches if required to make sure any data isn't cached that
9939 * shouldn't be, etc.
9940 */
9941 if (changed) {
9942 pmap_invalidate_range(kernel_pmap, base, tmpva);
9943 if ((flags & MAPDEV_FLUSHCACHE) != 0)
9944 pmap_invalidate_cache_range(base, tmpva);
9945 }
9946 return (error);
9947 }
9948
9949 /*
9950 * Demotes any mapping within the direct map region that covers more than the
9951 * specified range of physical addresses. This range's size must be a power
9952 * of two and its starting address must be a multiple of its size. Since the
9953 * demotion does not change any attributes of the mapping, a TLB invalidation
9954 * is not mandatory. The caller may, however, request a TLB invalidation.
9955 */
9956 void
pmap_demote_DMAP(vm_paddr_t base,vm_size_t len,bool invalidate)9957 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate)
9958 {
9959 pdp_entry_t *pdpe;
9960 pd_entry_t *pde;
9961 vm_offset_t va;
9962 bool changed;
9963
9964 if (len == 0)
9965 return;
9966 KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
9967 KASSERT((base & (len - 1)) == 0,
9968 ("pmap_demote_DMAP: base is not a multiple of len"));
9969 if (len < NBPDP && base < dmaplimit) {
9970 va = PHYS_TO_DMAP(base);
9971 changed = false;
9972 PMAP_LOCK(kernel_pmap);
9973 pdpe = pmap_pdpe(kernel_pmap, va);
9974 if ((*pdpe & X86_PG_V) == 0)
9975 panic("pmap_demote_DMAP: invalid PDPE");
9976 if ((*pdpe & PG_PS) != 0) {
9977 if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
9978 panic("pmap_demote_DMAP: PDPE failed");
9979 changed = true;
9980 }
9981 if (len < NBPDR) {
9982 pde = pmap_pdpe_to_pde(pdpe, va);
9983 if ((*pde & X86_PG_V) == 0)
9984 panic("pmap_demote_DMAP: invalid PDE");
9985 if ((*pde & PG_PS) != 0) {
9986 if (!pmap_demote_pde(kernel_pmap, pde, va))
9987 panic("pmap_demote_DMAP: PDE failed");
9988 changed = true;
9989 }
9990 }
9991 if (changed && invalidate)
9992 pmap_invalidate_page(kernel_pmap, va);
9993 PMAP_UNLOCK(kernel_pmap);
9994 }
9995 }
9996
9997 /*
9998 * Perform the pmap work for mincore(2). If the page is not both referenced and
9999 * modified by this pmap, returns its physical address so that the caller can
10000 * find other mappings.
10001 */
10002 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)10003 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
10004 {
10005 pdp_entry_t *pdpe;
10006 pd_entry_t *pdep;
10007 pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
10008 vm_paddr_t pa;
10009 int val;
10010
10011 PG_A = pmap_accessed_bit(pmap);
10012 PG_M = pmap_modified_bit(pmap);
10013 PG_V = pmap_valid_bit(pmap);
10014 PG_RW = pmap_rw_bit(pmap);
10015
10016 PMAP_LOCK(pmap);
10017 pte = 0;
10018 pa = 0;
10019 val = 0;
10020 pdpe = pmap_pdpe(pmap, addr);
10021 if (pdpe == NULL)
10022 goto out;
10023 if ((*pdpe & PG_V) != 0) {
10024 if ((*pdpe & PG_PS) != 0) {
10025 pte = *pdpe;
10026 pa = ((pte & PG_PS_PDP_FRAME) | (addr & PDPMASK)) &
10027 PG_FRAME;
10028 val = MINCORE_PSIND(2);
10029 } else {
10030 pdep = pmap_pde(pmap, addr);
10031 if (pdep != NULL && (*pdep & PG_V) != 0) {
10032 if ((*pdep & PG_PS) != 0) {
10033 pte = *pdep;
10034 /* Compute the physical address of the 4KB page. */
10035 pa = ((pte & PG_PS_FRAME) | (addr &
10036 PDRMASK)) & PG_FRAME;
10037 val = MINCORE_PSIND(1);
10038 } else {
10039 pte = *pmap_pde_to_pte(pdep, addr);
10040 pa = pte & PG_FRAME;
10041 val = 0;
10042 }
10043 }
10044 }
10045 }
10046 if ((pte & PG_V) != 0) {
10047 val |= MINCORE_INCORE;
10048 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
10049 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
10050 if ((pte & PG_A) != 0)
10051 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
10052 }
10053 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
10054 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
10055 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
10056 *pap = pa;
10057 }
10058 out:
10059 PMAP_UNLOCK(pmap);
10060 return (val);
10061 }
10062
10063 static uint64_t
pmap_pcid_alloc(pmap_t pmap,struct pmap_pcid * pcidp)10064 pmap_pcid_alloc(pmap_t pmap, struct pmap_pcid *pcidp)
10065 {
10066 uint32_t gen, new_gen, pcid_next;
10067
10068 CRITICAL_ASSERT(curthread);
10069 gen = PCPU_GET(pcid_gen);
10070 if (pcidp->pm_pcid == PMAP_PCID_KERN)
10071 return (pti ? 0 : CR3_PCID_SAVE);
10072 if (pcidp->pm_gen == gen)
10073 return (CR3_PCID_SAVE);
10074 pcid_next = PCPU_GET(pcid_next);
10075 KASSERT((!pti && pcid_next <= PMAP_PCID_OVERMAX) ||
10076 (pti && pcid_next <= PMAP_PCID_OVERMAX_KERN),
10077 ("cpu %d pcid_next %#x", PCPU_GET(cpuid), pcid_next));
10078 if ((!pti && pcid_next == PMAP_PCID_OVERMAX) ||
10079 (pti && pcid_next == PMAP_PCID_OVERMAX_KERN)) {
10080 new_gen = gen + 1;
10081 if (new_gen == 0)
10082 new_gen = 1;
10083 PCPU_SET(pcid_gen, new_gen);
10084 pcid_next = PMAP_PCID_KERN + 1;
10085 } else {
10086 new_gen = gen;
10087 }
10088 pcidp->pm_pcid = pcid_next;
10089 pcidp->pm_gen = new_gen;
10090 PCPU_SET(pcid_next, pcid_next + 1);
10091 return (0);
10092 }
10093
10094 static uint64_t
pmap_pcid_alloc_checked(pmap_t pmap,struct pmap_pcid * pcidp)10095 pmap_pcid_alloc_checked(pmap_t pmap, struct pmap_pcid *pcidp)
10096 {
10097 uint64_t cached;
10098
10099 cached = pmap_pcid_alloc(pmap, pcidp);
10100 KASSERT(pcidp->pm_pcid < PMAP_PCID_OVERMAX,
10101 ("pmap %p cpu %d pcid %#x", pmap, PCPU_GET(cpuid), pcidp->pm_pcid));
10102 KASSERT(pcidp->pm_pcid != PMAP_PCID_KERN || pmap == kernel_pmap,
10103 ("non-kernel pmap pmap %p cpu %d pcid %#x",
10104 pmap, PCPU_GET(cpuid), pcidp->pm_pcid));
10105 return (cached);
10106 }
10107
10108 static void
pmap_activate_sw_pti_post(struct thread * td,pmap_t pmap)10109 pmap_activate_sw_pti_post(struct thread *td, pmap_t pmap)
10110 {
10111
10112 PCPU_GET(tssp)->tss_rsp0 = pmap->pm_ucr3 != PMAP_NO_CR3 ?
10113 PCPU_GET(pti_rsp0) : (uintptr_t)td->td_md.md_stack_base;
10114 }
10115
10116 static void
pmap_activate_sw_pcid_pti(struct thread * td,pmap_t pmap,u_int cpuid)10117 pmap_activate_sw_pcid_pti(struct thread *td, pmap_t pmap, u_int cpuid)
10118 {
10119 pmap_t old_pmap;
10120 struct pmap_pcid *pcidp, *old_pcidp;
10121 uint64_t cached, cr3, kcr3, ucr3;
10122
10123 KASSERT((read_rflags() & PSL_I) == 0,
10124 ("PCID needs interrupts disabled in pmap_activate_sw()"));
10125
10126 /* See the comment in pmap_invalidate_page_pcid(). */
10127 if (PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK) {
10128 PCPU_SET(ucr3_load_mask, PMAP_UCR3_NOMASK);
10129 old_pmap = PCPU_GET(curpmap);
10130 MPASS(old_pmap->pm_ucr3 != PMAP_NO_CR3);
10131 old_pcidp = zpcpu_get_cpu(old_pmap->pm_pcidp, cpuid);
10132 old_pcidp->pm_gen = 0;
10133 }
10134
10135 pcidp = zpcpu_get_cpu(pmap->pm_pcidp, cpuid);
10136 cached = pmap_pcid_alloc_checked(pmap, pcidp);
10137 cr3 = rcr3();
10138 if ((cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
10139 load_cr3(pmap->pm_cr3 | pcidp->pm_pcid);
10140 PCPU_SET(curpmap, pmap);
10141 kcr3 = pmap->pm_cr3 | pcidp->pm_pcid;
10142 ucr3 = pmap->pm_ucr3 | pcidp->pm_pcid | PMAP_PCID_USER_PT;
10143
10144 if (!cached && pmap->pm_ucr3 != PMAP_NO_CR3)
10145 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
10146
10147 PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
10148 PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
10149 if (cached)
10150 counter_u64_add(pcid_save_cnt, 1);
10151
10152 pmap_activate_sw_pti_post(td, pmap);
10153 }
10154
10155 static void
pmap_activate_sw_pcid_nopti(struct thread * td __unused,pmap_t pmap,u_int cpuid)10156 pmap_activate_sw_pcid_nopti(struct thread *td __unused, pmap_t pmap,
10157 u_int cpuid)
10158 {
10159 struct pmap_pcid *pcidp;
10160 uint64_t cached, cr3;
10161
10162 KASSERT((read_rflags() & PSL_I) == 0,
10163 ("PCID needs interrupts disabled in pmap_activate_sw()"));
10164
10165 pcidp = zpcpu_get_cpu(pmap->pm_pcidp, cpuid);
10166 cached = pmap_pcid_alloc_checked(pmap, pcidp);
10167 cr3 = rcr3();
10168 if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
10169 load_cr3(pmap->pm_cr3 | pcidp->pm_pcid | cached);
10170 PCPU_SET(curpmap, pmap);
10171 if (cached)
10172 counter_u64_add(pcid_save_cnt, 1);
10173 }
10174
10175 static void
pmap_activate_sw_nopcid_nopti(struct thread * td __unused,pmap_t pmap,u_int cpuid __unused)10176 pmap_activate_sw_nopcid_nopti(struct thread *td __unused, pmap_t pmap,
10177 u_int cpuid __unused)
10178 {
10179
10180 load_cr3(pmap->pm_cr3);
10181 PCPU_SET(curpmap, pmap);
10182 }
10183
10184 static void
pmap_activate_sw_nopcid_pti(struct thread * td,pmap_t pmap,u_int cpuid __unused)10185 pmap_activate_sw_nopcid_pti(struct thread *td, pmap_t pmap,
10186 u_int cpuid __unused)
10187 {
10188
10189 pmap_activate_sw_nopcid_nopti(td, pmap, cpuid);
10190 PCPU_SET(kcr3, pmap->pm_cr3);
10191 PCPU_SET(ucr3, pmap->pm_ucr3);
10192 pmap_activate_sw_pti_post(td, pmap);
10193 }
10194
10195 DEFINE_IFUNC(static, void, pmap_activate_sw_mode, (struct thread *, pmap_t,
10196 u_int))
10197 {
10198
10199 if (pmap_pcid_enabled && pti)
10200 return (pmap_activate_sw_pcid_pti);
10201 else if (pmap_pcid_enabled && !pti)
10202 return (pmap_activate_sw_pcid_nopti);
10203 else if (!pmap_pcid_enabled && pti)
10204 return (pmap_activate_sw_nopcid_pti);
10205 else /* if (!pmap_pcid_enabled && !pti) */
10206 return (pmap_activate_sw_nopcid_nopti);
10207 }
10208
10209 void
pmap_activate_sw(struct thread * td)10210 pmap_activate_sw(struct thread *td)
10211 {
10212 pmap_t oldpmap, pmap;
10213 u_int cpuid;
10214
10215 oldpmap = PCPU_GET(curpmap);
10216 pmap = vmspace_pmap(td->td_proc->p_vmspace);
10217 if (oldpmap == pmap) {
10218 if (cpu_vendor_id != CPU_VENDOR_INTEL)
10219 mfence();
10220 return;
10221 }
10222 cpuid = PCPU_GET(cpuid);
10223 #ifdef SMP
10224 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
10225 #else
10226 CPU_SET(cpuid, &pmap->pm_active);
10227 #endif
10228 pmap_activate_sw_mode(td, pmap, cpuid);
10229 #ifdef SMP
10230 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
10231 #else
10232 CPU_CLR(cpuid, &oldpmap->pm_active);
10233 #endif
10234 }
10235
10236 void
pmap_activate(struct thread * td)10237 pmap_activate(struct thread *td)
10238 {
10239 /*
10240 * invltlb_{invpcid,}_pcid_handler() is used to handle an
10241 * invalidate_all IPI, which checks for curpmap ==
10242 * smp_tlb_pmap. The below sequence of operations has a
10243 * window where %CR3 is loaded with the new pmap's PML4
10244 * address, but the curpmap value has not yet been updated.
10245 * This causes the invltlb IPI handler, which is called
10246 * between the updates, to execute as a NOP, which leaves
10247 * stale TLB entries.
10248 *
10249 * Note that the most common use of pmap_activate_sw(), from
10250 * a context switch, is immune to this race, because
10251 * interrupts are disabled (while the thread lock is owned),
10252 * so the IPI is delayed until after curpmap is updated. Protect
10253 * other callers in a similar way, by disabling interrupts
10254 * around the %cr3 register reload and curpmap assignment.
10255 */
10256 spinlock_enter();
10257 pmap_activate_sw(td);
10258 spinlock_exit();
10259 }
10260
10261 void
pmap_activate_boot(pmap_t pmap)10262 pmap_activate_boot(pmap_t pmap)
10263 {
10264 uint64_t kcr3;
10265 u_int cpuid;
10266
10267 /*
10268 * kernel_pmap must be never deactivated, and we ensure that
10269 * by never activating it at all.
10270 */
10271 MPASS(pmap != kernel_pmap);
10272
10273 cpuid = PCPU_GET(cpuid);
10274 #ifdef SMP
10275 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
10276 #else
10277 CPU_SET(cpuid, &pmap->pm_active);
10278 #endif
10279 PCPU_SET(curpmap, pmap);
10280 if (pti) {
10281 kcr3 = pmap->pm_cr3;
10282 if (pmap_pcid_enabled)
10283 kcr3 |= pmap_get_pcid(pmap) | CR3_PCID_SAVE;
10284 } else {
10285 kcr3 = PMAP_NO_CR3;
10286 }
10287 PCPU_SET(kcr3, kcr3);
10288 PCPU_SET(ucr3, PMAP_NO_CR3);
10289 }
10290
10291 void
pmap_active_cpus(pmap_t pmap,cpuset_t * res)10292 pmap_active_cpus(pmap_t pmap, cpuset_t *res)
10293 {
10294 *res = pmap->pm_active;
10295 }
10296
10297 void
pmap_sync_icache(pmap_t pm,vm_offset_t va,vm_size_t sz)10298 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
10299 {
10300 }
10301
10302 /*
10303 * Increase the starting virtual address of the given mapping if a
10304 * different alignment might result in more superpage mappings.
10305 */
10306 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)10307 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
10308 vm_offset_t *addr, vm_size_t size)
10309 {
10310 vm_offset_t superpage_offset;
10311
10312 if (size < NBPDR)
10313 return;
10314 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
10315 offset += ptoa(object->pg_color);
10316 superpage_offset = offset & PDRMASK;
10317 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
10318 (*addr & PDRMASK) == superpage_offset)
10319 return;
10320 if ((*addr & PDRMASK) < superpage_offset)
10321 *addr = (*addr & ~PDRMASK) + superpage_offset;
10322 else
10323 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
10324 }
10325
10326 #ifdef INVARIANTS
10327 static unsigned long num_dirty_emulations;
10328 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
10329 &num_dirty_emulations, 0, NULL);
10330
10331 static unsigned long num_accessed_emulations;
10332 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
10333 &num_accessed_emulations, 0, NULL);
10334
10335 static unsigned long num_superpage_accessed_emulations;
10336 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
10337 &num_superpage_accessed_emulations, 0, NULL);
10338
10339 static unsigned long ad_emulation_superpage_promotions;
10340 SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
10341 &ad_emulation_superpage_promotions, 0, NULL);
10342 #endif /* INVARIANTS */
10343
10344 int
pmap_emulate_accessed_dirty(pmap_t pmap,vm_offset_t va,int ftype)10345 pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
10346 {
10347 int rv;
10348 struct rwlock *lock;
10349 #if VM_NRESERVLEVEL > 0
10350 vm_page_t m, mpte;
10351 #endif
10352 pd_entry_t *pde;
10353 pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
10354
10355 KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
10356 ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
10357
10358 if (!pmap_emulate_ad_bits(pmap))
10359 return (-1);
10360
10361 PG_A = pmap_accessed_bit(pmap);
10362 PG_M = pmap_modified_bit(pmap);
10363 PG_V = pmap_valid_bit(pmap);
10364 PG_RW = pmap_rw_bit(pmap);
10365
10366 rv = -1;
10367 lock = NULL;
10368 PMAP_LOCK(pmap);
10369
10370 pde = pmap_pde(pmap, va);
10371 if (pde == NULL || (*pde & PG_V) == 0)
10372 goto done;
10373
10374 if ((*pde & PG_PS) != 0) {
10375 if (ftype == VM_PROT_READ) {
10376 #ifdef INVARIANTS
10377 atomic_add_long(&num_superpage_accessed_emulations, 1);
10378 #endif
10379 *pde |= PG_A;
10380 rv = 0;
10381 }
10382 goto done;
10383 }
10384
10385 pte = pmap_pde_to_pte(pde, va);
10386 if ((*pte & PG_V) == 0)
10387 goto done;
10388
10389 if (ftype == VM_PROT_WRITE) {
10390 if ((*pte & PG_RW) == 0)
10391 goto done;
10392 /*
10393 * Set the modified and accessed bits simultaneously.
10394 *
10395 * Intel EPT PTEs that do software emulation of A/D bits map
10396 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
10397 * An EPT misconfiguration is triggered if the PTE is writable
10398 * but not readable (WR=10). This is avoided by setting PG_A
10399 * and PG_M simultaneously.
10400 */
10401 *pte |= PG_M | PG_A;
10402 } else {
10403 *pte |= PG_A;
10404 }
10405
10406 #if VM_NRESERVLEVEL > 0
10407 /* try to promote the mapping */
10408 if (va < VM_MAXUSER_ADDRESS)
10409 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
10410 else
10411 mpte = NULL;
10412
10413 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
10414
10415 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
10416 (m->flags & PG_FICTITIOUS) == 0 &&
10417 vm_reserv_level_iffullpop(m) == 0 &&
10418 pmap_promote_pde(pmap, pde, va, mpte, &lock)) {
10419 #ifdef INVARIANTS
10420 atomic_add_long(&ad_emulation_superpage_promotions, 1);
10421 #endif
10422 }
10423 #endif
10424
10425 #ifdef INVARIANTS
10426 if (ftype == VM_PROT_WRITE)
10427 atomic_add_long(&num_dirty_emulations, 1);
10428 else
10429 atomic_add_long(&num_accessed_emulations, 1);
10430 #endif
10431 rv = 0; /* success */
10432 done:
10433 if (lock != NULL)
10434 rw_wunlock(lock);
10435 PMAP_UNLOCK(pmap);
10436 return (rv);
10437 }
10438
10439 void
pmap_get_mapping(pmap_t pmap,vm_offset_t va,uint64_t * ptr,int * num)10440 pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
10441 {
10442 pml4_entry_t *pml4;
10443 pdp_entry_t *pdp;
10444 pd_entry_t *pde;
10445 pt_entry_t *pte, PG_V;
10446 int idx;
10447
10448 idx = 0;
10449 PG_V = pmap_valid_bit(pmap);
10450 PMAP_LOCK(pmap);
10451
10452 pml4 = pmap_pml4e(pmap, va);
10453 if (pml4 == NULL)
10454 goto done;
10455 ptr[idx++] = *pml4;
10456 if ((*pml4 & PG_V) == 0)
10457 goto done;
10458
10459 pdp = pmap_pml4e_to_pdpe(pml4, va);
10460 ptr[idx++] = *pdp;
10461 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
10462 goto done;
10463
10464 pde = pmap_pdpe_to_pde(pdp, va);
10465 ptr[idx++] = *pde;
10466 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
10467 goto done;
10468
10469 pte = pmap_pde_to_pte(pde, va);
10470 ptr[idx++] = *pte;
10471
10472 done:
10473 PMAP_UNLOCK(pmap);
10474 *num = idx;
10475 }
10476
10477 /**
10478 * Get the kernel virtual address of a set of physical pages. If there are
10479 * physical addresses not covered by the DMAP perform a transient mapping
10480 * that will be removed when calling pmap_unmap_io_transient.
10481 *
10482 * \param page The pages the caller wishes to obtain the virtual
10483 * address on the kernel memory map.
10484 * \param vaddr On return contains the kernel virtual memory address
10485 * of the pages passed in the page parameter.
10486 * \param count Number of pages passed in.
10487 * \param can_fault true if the thread using the mapped pages can take
10488 * page faults, false otherwise.
10489 *
10490 * \returns true if the caller must call pmap_unmap_io_transient when
10491 * finished or false otherwise.
10492 *
10493 */
10494 bool
pmap_map_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)10495 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10496 bool can_fault)
10497 {
10498 vm_paddr_t paddr;
10499 bool needs_mapping;
10500 pt_entry_t *pte;
10501 int cache_bits, error __unused, i;
10502
10503 /*
10504 * Allocate any KVA space that we need, this is done in a separate
10505 * loop to prevent calling vmem_alloc while pinned.
10506 */
10507 needs_mapping = false;
10508 for (i = 0; i < count; i++) {
10509 paddr = VM_PAGE_TO_PHYS(page[i]);
10510 if (__predict_false(paddr >= dmaplimit)) {
10511 error = vmem_alloc(kernel_arena, PAGE_SIZE,
10512 M_BESTFIT | M_WAITOK, &vaddr[i]);
10513 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
10514 needs_mapping = true;
10515 } else {
10516 vaddr[i] = PHYS_TO_DMAP(paddr);
10517 }
10518 }
10519
10520 /* Exit early if everything is covered by the DMAP */
10521 if (!needs_mapping)
10522 return (false);
10523
10524 /*
10525 * NB: The sequence of updating a page table followed by accesses
10526 * to the corresponding pages used in the !DMAP case is subject to
10527 * the situation described in the "AMD64 Architecture Programmer's
10528 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
10529 * Coherency Considerations". Therefore, issuing the INVLPG right
10530 * after modifying the PTE bits is crucial.
10531 */
10532 if (!can_fault)
10533 sched_pin();
10534 for (i = 0; i < count; i++) {
10535 paddr = VM_PAGE_TO_PHYS(page[i]);
10536 if (paddr >= dmaplimit) {
10537 if (can_fault) {
10538 /*
10539 * Slow path, since we can get page faults
10540 * while mappings are active don't pin the
10541 * thread to the CPU and instead add a global
10542 * mapping visible to all CPUs.
10543 */
10544 pmap_qenter(vaddr[i], &page[i], 1);
10545 } else {
10546 pte = vtopte(vaddr[i]);
10547 cache_bits = pmap_cache_bits(kernel_pmap,
10548 page[i]->md.pat_mode, false);
10549 pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
10550 cache_bits);
10551 pmap_invlpg(kernel_pmap, vaddr[i]);
10552 }
10553 }
10554 }
10555
10556 return (needs_mapping);
10557 }
10558
10559 void
pmap_unmap_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)10560 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10561 bool can_fault)
10562 {
10563 vm_paddr_t paddr;
10564 int i;
10565
10566 if (!can_fault)
10567 sched_unpin();
10568 for (i = 0; i < count; i++) {
10569 paddr = VM_PAGE_TO_PHYS(page[i]);
10570 if (paddr >= dmaplimit) {
10571 if (can_fault)
10572 pmap_qremove(vaddr[i], 1);
10573 vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
10574 }
10575 }
10576 }
10577
10578 vm_offset_t
pmap_quick_enter_page(vm_page_t m)10579 pmap_quick_enter_page(vm_page_t m)
10580 {
10581 vm_paddr_t paddr;
10582
10583 paddr = VM_PAGE_TO_PHYS(m);
10584 if (paddr < dmaplimit)
10585 return (PHYS_TO_DMAP(paddr));
10586 mtx_lock_spin(&qframe_mtx);
10587 KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
10588
10589 /*
10590 * Since qframe is exclusively mapped by us, and we do not set
10591 * PG_G, we can use INVLPG here.
10592 */
10593 invlpg(qframe);
10594
10595 pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
10596 X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, false));
10597 return (qframe);
10598 }
10599
10600 void
pmap_quick_remove_page(vm_offset_t addr)10601 pmap_quick_remove_page(vm_offset_t addr)
10602 {
10603
10604 if (addr != qframe)
10605 return;
10606 pte_store(vtopte(qframe), 0);
10607 mtx_unlock_spin(&qframe_mtx);
10608 }
10609
10610 /*
10611 * Pdp pages from the large map are managed differently from either
10612 * kernel or user page table pages. They are permanently allocated at
10613 * initialization time, and their reference count is permanently set to
10614 * zero. The pml4 entries pointing to those pages are copied into
10615 * each allocated pmap.
10616 *
10617 * In contrast, pd and pt pages are managed like user page table
10618 * pages. They are dynamically allocated, and their reference count
10619 * represents the number of valid entries within the page.
10620 */
10621 static vm_page_t
pmap_large_map_getptp_unlocked(void)10622 pmap_large_map_getptp_unlocked(void)
10623 {
10624 return (pmap_alloc_pt_page(kernel_pmap, 0, VM_ALLOC_ZERO));
10625 }
10626
10627 static vm_page_t
pmap_large_map_getptp(void)10628 pmap_large_map_getptp(void)
10629 {
10630 vm_page_t m;
10631
10632 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
10633 m = pmap_large_map_getptp_unlocked();
10634 if (m == NULL) {
10635 PMAP_UNLOCK(kernel_pmap);
10636 vm_wait(NULL);
10637 PMAP_LOCK(kernel_pmap);
10638 /* Callers retry. */
10639 }
10640 return (m);
10641 }
10642
10643 static pdp_entry_t *
pmap_large_map_pdpe(vm_offset_t va)10644 pmap_large_map_pdpe(vm_offset_t va)
10645 {
10646 vm_pindex_t pml4_idx;
10647 vm_paddr_t mphys;
10648
10649 pml4_idx = pmap_pml4e_index(va);
10650 KASSERT(LMSPML4I <= pml4_idx && pml4_idx < LMSPML4I + lm_ents,
10651 ("pmap_large_map_pdpe: va %#jx out of range idx %#jx LMSPML4I "
10652 "%#jx lm_ents %d",
10653 (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10654 KASSERT((kernel_pml4[pml4_idx] & X86_PG_V) != 0,
10655 ("pmap_large_map_pdpe: invalid pml4 for va %#jx idx %#jx "
10656 "LMSPML4I %#jx lm_ents %d",
10657 (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10658 mphys = kernel_pml4[pml4_idx] & PG_FRAME;
10659 return ((pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va));
10660 }
10661
10662 static pd_entry_t *
pmap_large_map_pde(vm_offset_t va)10663 pmap_large_map_pde(vm_offset_t va)
10664 {
10665 pdp_entry_t *pdpe;
10666 vm_page_t m;
10667 vm_paddr_t mphys;
10668
10669 retry:
10670 pdpe = pmap_large_map_pdpe(va);
10671 if (*pdpe == 0) {
10672 m = pmap_large_map_getptp();
10673 if (m == NULL)
10674 goto retry;
10675 mphys = VM_PAGE_TO_PHYS(m);
10676 *pdpe = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10677 } else {
10678 MPASS((*pdpe & X86_PG_PS) == 0);
10679 mphys = *pdpe & PG_FRAME;
10680 }
10681 return ((pd_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pde_index(va));
10682 }
10683
10684 static pt_entry_t *
pmap_large_map_pte(vm_offset_t va)10685 pmap_large_map_pte(vm_offset_t va)
10686 {
10687 pd_entry_t *pde;
10688 vm_page_t m;
10689 vm_paddr_t mphys;
10690
10691 retry:
10692 pde = pmap_large_map_pde(va);
10693 if (*pde == 0) {
10694 m = pmap_large_map_getptp();
10695 if (m == NULL)
10696 goto retry;
10697 mphys = VM_PAGE_TO_PHYS(m);
10698 *pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10699 PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++;
10700 } else {
10701 MPASS((*pde & X86_PG_PS) == 0);
10702 mphys = *pde & PG_FRAME;
10703 }
10704 return ((pt_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pte_index(va));
10705 }
10706
10707 static vm_paddr_t
pmap_large_map_kextract(vm_offset_t va)10708 pmap_large_map_kextract(vm_offset_t va)
10709 {
10710 pdp_entry_t *pdpe, pdp;
10711 pd_entry_t *pde, pd;
10712 pt_entry_t *pte, pt;
10713
10714 KASSERT(PMAP_ADDRESS_IN_LARGEMAP(va),
10715 ("not largemap range %#lx", (u_long)va));
10716 pdpe = pmap_large_map_pdpe(va);
10717 pdp = *pdpe;
10718 KASSERT((pdp & X86_PG_V) != 0,
10719 ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10720 (u_long)pdpe, pdp));
10721 if ((pdp & X86_PG_PS) != 0) {
10722 KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10723 ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10724 (u_long)pdpe, pdp));
10725 return ((pdp & PG_PS_PDP_FRAME) | (va & PDPMASK));
10726 }
10727 pde = pmap_pdpe_to_pde(pdpe, va);
10728 pd = *pde;
10729 KASSERT((pd & X86_PG_V) != 0,
10730 ("invalid pd va %#lx pde %#lx pd %#lx", va, (u_long)pde, pd));
10731 if ((pd & X86_PG_PS) != 0)
10732 return ((pd & PG_PS_FRAME) | (va & PDRMASK));
10733 pte = pmap_pde_to_pte(pde, va);
10734 pt = *pte;
10735 KASSERT((pt & X86_PG_V) != 0,
10736 ("invalid pte va %#lx pte %#lx pt %#lx", va, (u_long)pte, pt));
10737 return ((pt & PG_FRAME) | (va & PAGE_MASK));
10738 }
10739
10740 static int
pmap_large_map_getva(vm_size_t len,vm_offset_t align,vm_offset_t phase,vmem_addr_t * vmem_res)10741 pmap_large_map_getva(vm_size_t len, vm_offset_t align, vm_offset_t phase,
10742 vmem_addr_t *vmem_res)
10743 {
10744
10745 /*
10746 * Large mappings are all but static. Consequently, there
10747 * is no point in waiting for an earlier allocation to be
10748 * freed.
10749 */
10750 return (vmem_xalloc(large_vmem, len, align, phase, 0, VMEM_ADDR_MIN,
10751 VMEM_ADDR_MAX, M_NOWAIT | M_BESTFIT, vmem_res));
10752 }
10753
10754 int
pmap_large_map(vm_paddr_t spa,vm_size_t len,void ** addr,vm_memattr_t mattr)10755 pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
10756 vm_memattr_t mattr)
10757 {
10758 pdp_entry_t *pdpe;
10759 pd_entry_t *pde;
10760 pt_entry_t *pte;
10761 vm_offset_t va, inc;
10762 vmem_addr_t vmem_res;
10763 vm_paddr_t pa;
10764 int error;
10765
10766 if (len == 0 || spa + len < spa)
10767 return (EINVAL);
10768
10769 /* See if DMAP can serve. */
10770 if (spa + len <= dmaplimit) {
10771 va = PHYS_TO_DMAP(spa);
10772 *addr = (void *)va;
10773 return (pmap_change_attr(va, len, mattr));
10774 }
10775
10776 /*
10777 * No, allocate KVA. Fit the address with best possible
10778 * alignment for superpages. Fall back to worse align if
10779 * failed.
10780 */
10781 error = ENOMEM;
10782 if ((amd_feature & AMDID_PAGE1GB) != 0 && rounddown2(spa + len,
10783 NBPDP) >= roundup2(spa, NBPDP) + NBPDP)
10784 error = pmap_large_map_getva(len, NBPDP, spa & PDPMASK,
10785 &vmem_res);
10786 if (error != 0 && rounddown2(spa + len, NBPDR) >= roundup2(spa,
10787 NBPDR) + NBPDR)
10788 error = pmap_large_map_getva(len, NBPDR, spa & PDRMASK,
10789 &vmem_res);
10790 if (error != 0)
10791 error = pmap_large_map_getva(len, PAGE_SIZE, 0, &vmem_res);
10792 if (error != 0)
10793 return (error);
10794
10795 /*
10796 * Fill pagetable. PG_M is not pre-set, we scan modified bits
10797 * in the pagetable to minimize flushing. No need to
10798 * invalidate TLB, since we only update invalid entries.
10799 */
10800 PMAP_LOCK(kernel_pmap);
10801 for (pa = spa, va = vmem_res; len > 0; pa += inc, va += inc,
10802 len -= inc) {
10803 if ((amd_feature & AMDID_PAGE1GB) != 0 && len >= NBPDP &&
10804 (pa & PDPMASK) == 0 && (va & PDPMASK) == 0) {
10805 pdpe = pmap_large_map_pdpe(va);
10806 MPASS(*pdpe == 0);
10807 *pdpe = pa | pg_g | X86_PG_PS | X86_PG_RW |
10808 X86_PG_V | X86_PG_A | pg_nx |
10809 pmap_cache_bits(kernel_pmap, mattr, true);
10810 inc = NBPDP;
10811 } else if (len >= NBPDR && (pa & PDRMASK) == 0 &&
10812 (va & PDRMASK) == 0) {
10813 pde = pmap_large_map_pde(va);
10814 MPASS(*pde == 0);
10815 *pde = pa | pg_g | X86_PG_PS | X86_PG_RW |
10816 X86_PG_V | X86_PG_A | pg_nx |
10817 pmap_cache_bits(kernel_pmap, mattr, true);
10818 PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
10819 ref_count++;
10820 inc = NBPDR;
10821 } else {
10822 pte = pmap_large_map_pte(va);
10823 MPASS(*pte == 0);
10824 *pte = pa | pg_g | X86_PG_RW | X86_PG_V |
10825 X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
10826 mattr, false);
10827 PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
10828 ref_count++;
10829 inc = PAGE_SIZE;
10830 }
10831 }
10832 PMAP_UNLOCK(kernel_pmap);
10833 MPASS(len == 0);
10834
10835 *addr = (void *)vmem_res;
10836 return (0);
10837 }
10838
10839 void
pmap_large_unmap(void * svaa,vm_size_t len)10840 pmap_large_unmap(void *svaa, vm_size_t len)
10841 {
10842 vm_offset_t sva, va;
10843 vm_size_t inc;
10844 pdp_entry_t *pdpe, pdp;
10845 pd_entry_t *pde, pd;
10846 pt_entry_t *pte;
10847 vm_page_t m;
10848 struct spglist spgf;
10849
10850 sva = (vm_offset_t)svaa;
10851 if (len == 0 || sva + len < sva || (sva >= DMAP_MIN_ADDRESS &&
10852 sva + len <= DMAP_MIN_ADDRESS + dmaplimit))
10853 return;
10854
10855 SLIST_INIT(&spgf);
10856 KASSERT(PMAP_ADDRESS_IN_LARGEMAP(sva) &&
10857 PMAP_ADDRESS_IN_LARGEMAP(sva + len - 1),
10858 ("not largemap range %#lx %#lx", (u_long)svaa, (u_long)svaa + len));
10859 PMAP_LOCK(kernel_pmap);
10860 for (va = sva; va < sva + len; va += inc) {
10861 pdpe = pmap_large_map_pdpe(va);
10862 pdp = *pdpe;
10863 KASSERT((pdp & X86_PG_V) != 0,
10864 ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10865 (u_long)pdpe, pdp));
10866 if ((pdp & X86_PG_PS) != 0) {
10867 KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10868 ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10869 (u_long)pdpe, pdp));
10870 KASSERT((va & PDPMASK) == 0,
10871 ("PDPMASK bit set, va %#lx pdpe %#lx pdp %#lx", va,
10872 (u_long)pdpe, pdp));
10873 KASSERT(va + NBPDP <= sva + len,
10874 ("unmap covers partial 1GB page, sva %#lx va %#lx "
10875 "pdpe %#lx pdp %#lx len %#lx", sva, va,
10876 (u_long)pdpe, pdp, len));
10877 *pdpe = 0;
10878 inc = NBPDP;
10879 continue;
10880 }
10881 pde = pmap_pdpe_to_pde(pdpe, va);
10882 pd = *pde;
10883 KASSERT((pd & X86_PG_V) != 0,
10884 ("invalid pd va %#lx pde %#lx pd %#lx", va,
10885 (u_long)pde, pd));
10886 if ((pd & X86_PG_PS) != 0) {
10887 KASSERT((va & PDRMASK) == 0,
10888 ("PDRMASK bit set, va %#lx pde %#lx pd %#lx", va,
10889 (u_long)pde, pd));
10890 KASSERT(va + NBPDR <= sva + len,
10891 ("unmap covers partial 2MB page, sva %#lx va %#lx "
10892 "pde %#lx pd %#lx len %#lx", sva, va, (u_long)pde,
10893 pd, len));
10894 pde_store(pde, 0);
10895 inc = NBPDR;
10896 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10897 m->ref_count--;
10898 if (m->ref_count == 0) {
10899 *pdpe = 0;
10900 SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10901 }
10902 continue;
10903 }
10904 pte = pmap_pde_to_pte(pde, va);
10905 KASSERT((*pte & X86_PG_V) != 0,
10906 ("invalid pte va %#lx pte %#lx pt %#lx", va,
10907 (u_long)pte, *pte));
10908 pte_clear(pte);
10909 inc = PAGE_SIZE;
10910 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte));
10911 m->ref_count--;
10912 if (m->ref_count == 0) {
10913 *pde = 0;
10914 SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10915 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10916 m->ref_count--;
10917 if (m->ref_count == 0) {
10918 *pdpe = 0;
10919 SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10920 }
10921 }
10922 }
10923 pmap_invalidate_range(kernel_pmap, sva, sva + len);
10924 PMAP_UNLOCK(kernel_pmap);
10925 vm_page_free_pages_toq(&spgf, false);
10926 vmem_free(large_vmem, sva, len);
10927 }
10928
10929 static void
pmap_large_map_wb_fence_mfence(void)10930 pmap_large_map_wb_fence_mfence(void)
10931 {
10932
10933 mfence();
10934 }
10935
10936 static void
pmap_large_map_wb_fence_atomic(void)10937 pmap_large_map_wb_fence_atomic(void)
10938 {
10939
10940 atomic_thread_fence_seq_cst();
10941 }
10942
10943 static void
pmap_large_map_wb_fence_nop(void)10944 pmap_large_map_wb_fence_nop(void)
10945 {
10946 }
10947
10948 DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (void))
10949 {
10950
10951 if (cpu_vendor_id != CPU_VENDOR_INTEL)
10952 return (pmap_large_map_wb_fence_mfence);
10953 else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
10954 CPUID_STDEXT_CLFLUSHOPT)) == 0)
10955 return (pmap_large_map_wb_fence_atomic);
10956 else
10957 /* clflush is strongly enough ordered */
10958 return (pmap_large_map_wb_fence_nop);
10959 }
10960
10961 static void
pmap_large_map_flush_range_clwb(vm_offset_t va,vm_size_t len)10962 pmap_large_map_flush_range_clwb(vm_offset_t va, vm_size_t len)
10963 {
10964
10965 for (; len > 0; len -= cpu_clflush_line_size,
10966 va += cpu_clflush_line_size)
10967 clwb(va);
10968 }
10969
10970 static void
pmap_large_map_flush_range_clflushopt(vm_offset_t va,vm_size_t len)10971 pmap_large_map_flush_range_clflushopt(vm_offset_t va, vm_size_t len)
10972 {
10973
10974 for (; len > 0; len -= cpu_clflush_line_size,
10975 va += cpu_clflush_line_size)
10976 clflushopt(va);
10977 }
10978
10979 static void
pmap_large_map_flush_range_clflush(vm_offset_t va,vm_size_t len)10980 pmap_large_map_flush_range_clflush(vm_offset_t va, vm_size_t len)
10981 {
10982
10983 for (; len > 0; len -= cpu_clflush_line_size,
10984 va += cpu_clflush_line_size)
10985 clflush(va);
10986 }
10987
10988 static void
pmap_large_map_flush_range_nop(vm_offset_t sva __unused,vm_size_t len __unused)10989 pmap_large_map_flush_range_nop(vm_offset_t sva __unused, vm_size_t len __unused)
10990 {
10991 }
10992
10993 DEFINE_IFUNC(static, void, pmap_large_map_flush_range, (vm_offset_t, vm_size_t))
10994 {
10995
10996 if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) != 0)
10997 return (pmap_large_map_flush_range_clwb);
10998 else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0)
10999 return (pmap_large_map_flush_range_clflushopt);
11000 else if ((cpu_feature & CPUID_CLFSH) != 0)
11001 return (pmap_large_map_flush_range_clflush);
11002 else
11003 return (pmap_large_map_flush_range_nop);
11004 }
11005
11006 static void
pmap_large_map_wb_large(vm_offset_t sva,vm_offset_t eva)11007 pmap_large_map_wb_large(vm_offset_t sva, vm_offset_t eva)
11008 {
11009 volatile u_long *pe;
11010 u_long p;
11011 vm_offset_t va;
11012 vm_size_t inc;
11013 bool seen_other;
11014
11015 for (va = sva; va < eva; va += inc) {
11016 inc = 0;
11017 if ((amd_feature & AMDID_PAGE1GB) != 0) {
11018 pe = (volatile u_long *)pmap_large_map_pdpe(va);
11019 p = *pe;
11020 if ((p & X86_PG_PS) != 0)
11021 inc = NBPDP;
11022 }
11023 if (inc == 0) {
11024 pe = (volatile u_long *)pmap_large_map_pde(va);
11025 p = *pe;
11026 if ((p & X86_PG_PS) != 0)
11027 inc = NBPDR;
11028 }
11029 if (inc == 0) {
11030 pe = (volatile u_long *)pmap_large_map_pte(va);
11031 p = *pe;
11032 inc = PAGE_SIZE;
11033 }
11034 seen_other = false;
11035 for (;;) {
11036 if ((p & X86_PG_AVAIL1) != 0) {
11037 /*
11038 * Spin-wait for the end of a parallel
11039 * write-back.
11040 */
11041 cpu_spinwait();
11042 p = *pe;
11043
11044 /*
11045 * If we saw other write-back
11046 * occurring, we cannot rely on PG_M to
11047 * indicate state of the cache. The
11048 * PG_M bit is cleared before the
11049 * flush to avoid ignoring new writes,
11050 * and writes which are relevant for
11051 * us might happen after.
11052 */
11053 seen_other = true;
11054 continue;
11055 }
11056
11057 if ((p & X86_PG_M) != 0 || seen_other) {
11058 if (!atomic_fcmpset_long(pe, &p,
11059 (p & ~X86_PG_M) | X86_PG_AVAIL1))
11060 /*
11061 * If we saw PG_M without
11062 * PG_AVAIL1, and then on the
11063 * next attempt we do not
11064 * observe either PG_M or
11065 * PG_AVAIL1, the other
11066 * write-back started after us
11067 * and finished before us. We
11068 * can rely on it doing our
11069 * work.
11070 */
11071 continue;
11072 pmap_large_map_flush_range(va, inc);
11073 atomic_clear_long(pe, X86_PG_AVAIL1);
11074 }
11075 break;
11076 }
11077 maybe_yield();
11078 }
11079 }
11080
11081 /*
11082 * Write-back cache lines for the given address range.
11083 *
11084 * Must be called only on the range or sub-range returned from
11085 * pmap_large_map(). Must not be called on the coalesced ranges.
11086 *
11087 * Does nothing on CPUs without CLWB, CLFLUSHOPT, or CLFLUSH
11088 * instructions support.
11089 */
11090 void
pmap_large_map_wb(void * svap,vm_size_t len)11091 pmap_large_map_wb(void *svap, vm_size_t len)
11092 {
11093 vm_offset_t eva, sva;
11094
11095 sva = (vm_offset_t)svap;
11096 eva = sva + len;
11097 pmap_large_map_wb_fence();
11098 if (sva >= DMAP_MIN_ADDRESS && eva <= DMAP_MIN_ADDRESS + dmaplimit) {
11099 pmap_large_map_flush_range(sva, len);
11100 } else {
11101 KASSERT(sva >= LARGEMAP_MIN_ADDRESS &&
11102 eva <= LARGEMAP_MIN_ADDRESS + lm_ents * NBPML4,
11103 ("pmap_large_map_wb: not largemap %#lx %#lx", sva, len));
11104 pmap_large_map_wb_large(sva, eva);
11105 }
11106 pmap_large_map_wb_fence();
11107 }
11108
11109 static vm_page_t
pmap_pti_alloc_page(void)11110 pmap_pti_alloc_page(void)
11111 {
11112 vm_page_t m;
11113
11114 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11115 m = vm_page_grab(pti_obj, pti_pg_idx++, VM_ALLOC_WIRED | VM_ALLOC_ZERO);
11116 return (m);
11117 }
11118
11119 static bool
pmap_pti_free_page(vm_page_t m)11120 pmap_pti_free_page(vm_page_t m)
11121 {
11122 if (!vm_page_unwire_noq(m))
11123 return (false);
11124 vm_page_xbusy_claim(m);
11125 vm_page_free_zero(m);
11126 return (true);
11127 }
11128
11129 static void
pmap_pti_init(void)11130 pmap_pti_init(void)
11131 {
11132 vm_page_t pml4_pg;
11133 pdp_entry_t *pdpe;
11134 vm_offset_t va;
11135 int i;
11136
11137 if (!pti)
11138 return;
11139 pti_obj = vm_pager_allocate(OBJT_PHYS, NULL, 0, VM_PROT_ALL, 0, NULL);
11140 VM_OBJECT_WLOCK(pti_obj);
11141 pml4_pg = pmap_pti_alloc_page();
11142 pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
11143 for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
11144 va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
11145 pdpe = pmap_pti_pdpe(va);
11146 pmap_pti_wire_pte(pdpe);
11147 }
11148 pmap_pti_add_kva_locked((vm_offset_t)&__pcpu[0],
11149 (vm_offset_t)&__pcpu[0] + sizeof(__pcpu[0]) * MAXCPU, false);
11150 pmap_pti_add_kva_locked((vm_offset_t)idt, (vm_offset_t)idt +
11151 sizeof(struct gate_descriptor) * NIDT, false);
11152 CPU_FOREACH(i) {
11153 /* Doublefault stack IST 1 */
11154 va = __pcpu[i].pc_common_tss.tss_ist1 + sizeof(struct nmi_pcpu);
11155 pmap_pti_add_kva_locked(va - DBLFAULT_STACK_SIZE, va, false);
11156 /* NMI stack IST 2 */
11157 va = __pcpu[i].pc_common_tss.tss_ist2 + sizeof(struct nmi_pcpu);
11158 pmap_pti_add_kva_locked(va - NMI_STACK_SIZE, va, false);
11159 /* MC# stack IST 3 */
11160 va = __pcpu[i].pc_common_tss.tss_ist3 +
11161 sizeof(struct nmi_pcpu);
11162 pmap_pti_add_kva_locked(va - MCE_STACK_SIZE, va, false);
11163 /* DB# stack IST 4 */
11164 va = __pcpu[i].pc_common_tss.tss_ist4 + sizeof(struct nmi_pcpu);
11165 pmap_pti_add_kva_locked(va - DBG_STACK_SIZE, va, false);
11166 }
11167 pmap_pti_add_kva_locked((vm_offset_t)KERNSTART, (vm_offset_t)etext,
11168 true);
11169 pti_finalized = true;
11170 VM_OBJECT_WUNLOCK(pti_obj);
11171 }
11172
11173 static void
pmap_cpu_init(void * arg __unused)11174 pmap_cpu_init(void *arg __unused)
11175 {
11176 CPU_COPY(&all_cpus, &kernel_pmap->pm_active);
11177 pmap_pti_init();
11178 }
11179 SYSINIT(pmap_cpu, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_cpu_init, NULL);
11180
11181 static pdp_entry_t *
pmap_pti_pdpe(vm_offset_t va)11182 pmap_pti_pdpe(vm_offset_t va)
11183 {
11184 pml4_entry_t *pml4e;
11185 pdp_entry_t *pdpe;
11186 vm_page_t m;
11187 vm_pindex_t pml4_idx;
11188 vm_paddr_t mphys;
11189
11190 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11191
11192 pml4_idx = pmap_pml4e_index(va);
11193 pml4e = &pti_pml4[pml4_idx];
11194 m = NULL;
11195 if (*pml4e == 0) {
11196 if (pti_finalized)
11197 panic("pml4 alloc after finalization\n");
11198 m = pmap_pti_alloc_page();
11199 if (*pml4e != 0) {
11200 pmap_pti_free_page(m);
11201 mphys = *pml4e & ~PAGE_MASK;
11202 } else {
11203 mphys = VM_PAGE_TO_PHYS(m);
11204 *pml4e = mphys | X86_PG_RW | X86_PG_V;
11205 }
11206 } else {
11207 mphys = *pml4e & ~PAGE_MASK;
11208 }
11209 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va);
11210 return (pdpe);
11211 }
11212
11213 static void
pmap_pti_wire_pte(void * pte)11214 pmap_pti_wire_pte(void *pte)
11215 {
11216 vm_page_t m;
11217
11218 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11219 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
11220 m->ref_count++;
11221 }
11222
11223 static void
pmap_pti_unwire_pde(void * pde,bool only_ref)11224 pmap_pti_unwire_pde(void *pde, bool only_ref)
11225 {
11226 vm_page_t m;
11227
11228 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11229 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
11230 MPASS(only_ref || m->ref_count > 1);
11231 pmap_pti_free_page(m);
11232 }
11233
11234 static void
pmap_pti_unwire_pte(void * pte,vm_offset_t va)11235 pmap_pti_unwire_pte(void *pte, vm_offset_t va)
11236 {
11237 vm_page_t m;
11238 pd_entry_t *pde;
11239
11240 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11241 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
11242 if (pmap_pti_free_page(m)) {
11243 pde = pmap_pti_pde(va);
11244 MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);
11245 *pde = 0;
11246 pmap_pti_unwire_pde(pde, false);
11247 }
11248 }
11249
11250 static pd_entry_t *
pmap_pti_pde(vm_offset_t va)11251 pmap_pti_pde(vm_offset_t va)
11252 {
11253 pdp_entry_t *pdpe;
11254 pd_entry_t *pde;
11255 vm_page_t m;
11256 vm_pindex_t pd_idx;
11257 vm_paddr_t mphys;
11258
11259 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11260
11261 pdpe = pmap_pti_pdpe(va);
11262 if (*pdpe == 0) {
11263 m = pmap_pti_alloc_page();
11264 if (*pdpe != 0) {
11265 pmap_pti_free_page(m);
11266 MPASS((*pdpe & X86_PG_PS) == 0);
11267 mphys = *pdpe & ~PAGE_MASK;
11268 } else {
11269 mphys = VM_PAGE_TO_PHYS(m);
11270 *pdpe = mphys | X86_PG_RW | X86_PG_V;
11271 }
11272 } else {
11273 MPASS((*pdpe & X86_PG_PS) == 0);
11274 mphys = *pdpe & ~PAGE_MASK;
11275 }
11276
11277 pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
11278 pd_idx = pmap_pde_index(va);
11279 pde += pd_idx;
11280 return (pde);
11281 }
11282
11283 static pt_entry_t *
pmap_pti_pte(vm_offset_t va,bool * unwire_pde)11284 pmap_pti_pte(vm_offset_t va, bool *unwire_pde)
11285 {
11286 pd_entry_t *pde;
11287 pt_entry_t *pte;
11288 vm_page_t m;
11289 vm_paddr_t mphys;
11290
11291 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11292
11293 pde = pmap_pti_pde(va);
11294 if (unwire_pde != NULL) {
11295 *unwire_pde = true;
11296 pmap_pti_wire_pte(pde);
11297 }
11298 if (*pde == 0) {
11299 m = pmap_pti_alloc_page();
11300 if (*pde != 0) {
11301 pmap_pti_free_page(m);
11302 MPASS((*pde & X86_PG_PS) == 0);
11303 mphys = *pde & ~(PAGE_MASK | pg_nx);
11304 } else {
11305 mphys = VM_PAGE_TO_PHYS(m);
11306 *pde = mphys | X86_PG_RW | X86_PG_V;
11307 if (unwire_pde != NULL)
11308 *unwire_pde = false;
11309 }
11310 } else {
11311 MPASS((*pde & X86_PG_PS) == 0);
11312 mphys = *pde & ~(PAGE_MASK | pg_nx);
11313 }
11314
11315 pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
11316 pte += pmap_pte_index(va);
11317
11318 return (pte);
11319 }
11320
11321 static void
pmap_pti_add_kva_locked(vm_offset_t sva,vm_offset_t eva,bool exec)11322 pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, bool exec)
11323 {
11324 vm_paddr_t pa;
11325 pd_entry_t *pde;
11326 pt_entry_t *pte, ptev;
11327 bool unwire_pde;
11328
11329 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11330
11331 sva = trunc_page(sva);
11332 MPASS(sva > VM_MAXUSER_ADDRESS);
11333 eva = round_page(eva);
11334 MPASS(sva < eva);
11335 for (; sva < eva; sva += PAGE_SIZE) {
11336 pte = pmap_pti_pte(sva, &unwire_pde);
11337 pa = pmap_kextract(sva);
11338 ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_G |
11339 (exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap,
11340 VM_MEMATTR_DEFAULT, false);
11341 if (*pte == 0) {
11342 pte_store(pte, ptev);
11343 pmap_pti_wire_pte(pte);
11344 } else {
11345 KASSERT(!pti_finalized,
11346 ("pti overlap after fin %#lx %#lx %#lx",
11347 sva, *pte, ptev));
11348 KASSERT(*pte == ptev,
11349 ("pti non-identical pte after fin %#lx %#lx %#lx",
11350 sva, *pte, ptev));
11351 }
11352 if (unwire_pde) {
11353 pde = pmap_pti_pde(sva);
11354 pmap_pti_unwire_pde(pde, true);
11355 }
11356 }
11357 }
11358
11359 void
pmap_pti_add_kva(vm_offset_t sva,vm_offset_t eva,bool exec)11360 pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec)
11361 {
11362
11363 if (!pti)
11364 return;
11365 VM_OBJECT_WLOCK(pti_obj);
11366 pmap_pti_add_kva_locked(sva, eva, exec);
11367 VM_OBJECT_WUNLOCK(pti_obj);
11368 }
11369
11370 void
pmap_pti_remove_kva(vm_offset_t sva,vm_offset_t eva)11371 pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva)
11372 {
11373 pt_entry_t *pte;
11374 vm_offset_t va;
11375
11376 if (!pti)
11377 return;
11378 sva = rounddown2(sva, PAGE_SIZE);
11379 MPASS(sva > VM_MAXUSER_ADDRESS);
11380 eva = roundup2(eva, PAGE_SIZE);
11381 MPASS(sva < eva);
11382 VM_OBJECT_WLOCK(pti_obj);
11383 for (va = sva; va < eva; va += PAGE_SIZE) {
11384 pte = pmap_pti_pte(va, NULL);
11385 KASSERT((*pte & X86_PG_V) != 0,
11386 ("invalid pte va %#lx pte %#lx pt %#lx", va,
11387 (u_long)pte, *pte));
11388 pte_clear(pte);
11389 pmap_pti_unwire_pte(pte, va);
11390 }
11391 pmap_invalidate_range(kernel_pmap, sva, eva);
11392 VM_OBJECT_WUNLOCK(pti_obj);
11393 }
11394
11395 static void *
pkru_dup_range(void * ctx __unused,void * data)11396 pkru_dup_range(void *ctx __unused, void *data)
11397 {
11398 struct pmap_pkru_range *node, *new_node;
11399
11400 new_node = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
11401 if (new_node == NULL)
11402 return (NULL);
11403 node = data;
11404 memcpy(new_node, node, sizeof(*node));
11405 return (new_node);
11406 }
11407
11408 static void
pkru_free_range(void * ctx __unused,void * node)11409 pkru_free_range(void *ctx __unused, void *node)
11410 {
11411
11412 uma_zfree(pmap_pkru_ranges_zone, node);
11413 }
11414
11415 static int
pmap_pkru_assign(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx,int flags)11416 pmap_pkru_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
11417 int flags)
11418 {
11419 struct pmap_pkru_range *ppr;
11420 int error;
11421
11422 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11423 MPASS(pmap->pm_type == PT_X86);
11424 MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11425 if ((flags & AMD64_PKRU_EXCL) != 0 &&
11426 !rangeset_check_empty(&pmap->pm_pkru, sva, eva))
11427 return (EBUSY);
11428 ppr = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
11429 if (ppr == NULL)
11430 return (ENOMEM);
11431 ppr->pkru_keyidx = keyidx;
11432 ppr->pkru_flags = flags & AMD64_PKRU_PERSIST;
11433 error = rangeset_insert(&pmap->pm_pkru, sva, eva, ppr);
11434 if (error != 0)
11435 uma_zfree(pmap_pkru_ranges_zone, ppr);
11436 return (error);
11437 }
11438
11439 static int
pmap_pkru_deassign(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)11440 pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11441 {
11442
11443 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11444 MPASS(pmap->pm_type == PT_X86);
11445 MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11446 return (rangeset_remove(&pmap->pm_pkru, sva, eva));
11447 }
11448
11449 static void
pmap_pkru_deassign_all(pmap_t pmap)11450 pmap_pkru_deassign_all(pmap_t pmap)
11451 {
11452
11453 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11454 if (pmap->pm_type == PT_X86 &&
11455 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
11456 rangeset_remove_all(&pmap->pm_pkru);
11457 }
11458
11459 /*
11460 * Returns true if the PKU setting is the same across the specified address
11461 * range, and false otherwise. When returning true, updates the referenced PTE
11462 * to reflect the PKU setting.
11463 */
11464 static bool
pmap_pkru_same(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t * pte)11465 pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t *pte)
11466 {
11467 struct pmap_pkru_range *next_ppr, *ppr;
11468 vm_offset_t va;
11469 u_int keyidx;
11470
11471 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11472 KASSERT(pmap->pm_type != PT_X86 || (*pte & X86_PG_PKU_MASK) == 0,
11473 ("pte %p has unexpected PKU %ld", pte, *pte & X86_PG_PKU_MASK));
11474 if (pmap->pm_type != PT_X86 ||
11475 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
11476 sva >= VM_MAXUSER_ADDRESS)
11477 return (true);
11478 MPASS(eva <= VM_MAXUSER_ADDRESS);
11479 ppr = rangeset_lookup(&pmap->pm_pkru, sva);
11480 if (ppr == NULL) {
11481 ppr = rangeset_next(&pmap->pm_pkru, sva);
11482 return (ppr == NULL ||
11483 ppr->pkru_rs_el.re_start >= eva);
11484 }
11485 keyidx = ppr->pkru_keyidx;
11486 while ((va = ppr->pkru_rs_el.re_end) < eva) {
11487 next_ppr = rangeset_next(&pmap->pm_pkru, va);
11488 if (next_ppr == NULL ||
11489 va != next_ppr->pkru_rs_el.re_start ||
11490 keyidx != next_ppr->pkru_keyidx)
11491 return (false);
11492 ppr = next_ppr;
11493 }
11494 *pte |= X86_PG_PKU(keyidx);
11495 return (true);
11496 }
11497
11498 static pt_entry_t
pmap_pkru_get(pmap_t pmap,vm_offset_t va)11499 pmap_pkru_get(pmap_t pmap, vm_offset_t va)
11500 {
11501 struct pmap_pkru_range *ppr;
11502
11503 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11504 if (pmap->pm_type != PT_X86 ||
11505 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
11506 va >= VM_MAXUSER_ADDRESS)
11507 return (0);
11508 ppr = rangeset_lookup(&pmap->pm_pkru, va);
11509 if (ppr != NULL)
11510 return (X86_PG_PKU(ppr->pkru_keyidx));
11511 return (0);
11512 }
11513
11514 static bool
pred_pkru_on_remove(void * ctx __unused,void * r)11515 pred_pkru_on_remove(void *ctx __unused, void *r)
11516 {
11517 struct pmap_pkru_range *ppr;
11518
11519 ppr = r;
11520 return ((ppr->pkru_flags & AMD64_PKRU_PERSIST) == 0);
11521 }
11522
11523 static void
pmap_pkru_on_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)11524 pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11525 {
11526
11527 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11528 if (pmap->pm_type == PT_X86 &&
11529 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
11530 rangeset_remove_pred(&pmap->pm_pkru, sva, eva,
11531 pred_pkru_on_remove);
11532 }
11533 }
11534
11535 static int
pmap_pkru_copy(pmap_t dst_pmap,pmap_t src_pmap)11536 pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap)
11537 {
11538
11539 PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED);
11540 PMAP_LOCK_ASSERT(src_pmap, MA_OWNED);
11541 MPASS(dst_pmap->pm_type == PT_X86);
11542 MPASS(src_pmap->pm_type == PT_X86);
11543 MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11544 if (src_pmap->pm_pkru.rs_data_ctx == NULL)
11545 return (0);
11546 return (rangeset_copy(&dst_pmap->pm_pkru, &src_pmap->pm_pkru));
11547 }
11548
11549 static void
pmap_pkru_update_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx)11550 pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11551 u_int keyidx)
11552 {
11553 pml4_entry_t *pml4e;
11554 pdp_entry_t *pdpe;
11555 pd_entry_t newpde, ptpaddr, *pde;
11556 pt_entry_t newpte, *ptep, pte;
11557 vm_offset_t va, va_next;
11558 bool changed;
11559
11560 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11561 MPASS(pmap->pm_type == PT_X86);
11562 MPASS(keyidx <= PMAP_MAX_PKRU_IDX);
11563
11564 for (changed = false, va = sva; va < eva; va = va_next) {
11565 pml4e = pmap_pml4e(pmap, va);
11566 if (pml4e == NULL || (*pml4e & X86_PG_V) == 0) {
11567 va_next = (va + NBPML4) & ~PML4MASK;
11568 if (va_next < va)
11569 va_next = eva;
11570 continue;
11571 }
11572
11573 pdpe = pmap_pml4e_to_pdpe(pml4e, va);
11574 if ((*pdpe & X86_PG_V) == 0) {
11575 va_next = (va + NBPDP) & ~PDPMASK;
11576 if (va_next < va)
11577 va_next = eva;
11578 continue;
11579 }
11580
11581 va_next = (va + NBPDR) & ~PDRMASK;
11582 if (va_next < va)
11583 va_next = eva;
11584
11585 pde = pmap_pdpe_to_pde(pdpe, va);
11586 ptpaddr = *pde;
11587 if (ptpaddr == 0)
11588 continue;
11589
11590 MPASS((ptpaddr & X86_PG_V) != 0);
11591 if ((ptpaddr & PG_PS) != 0) {
11592 if (va + NBPDR == va_next && eva >= va_next) {
11593 newpde = (ptpaddr & ~X86_PG_PKU_MASK) |
11594 X86_PG_PKU(keyidx);
11595 if (newpde != ptpaddr) {
11596 *pde = newpde;
11597 changed = true;
11598 }
11599 continue;
11600 } else if (!pmap_demote_pde(pmap, pde, va)) {
11601 continue;
11602 }
11603 }
11604
11605 if (va_next > eva)
11606 va_next = eva;
11607
11608 for (ptep = pmap_pde_to_pte(pde, va); va != va_next;
11609 ptep++, va += PAGE_SIZE) {
11610 pte = *ptep;
11611 if ((pte & X86_PG_V) == 0)
11612 continue;
11613 newpte = (pte & ~X86_PG_PKU_MASK) | X86_PG_PKU(keyidx);
11614 if (newpte != pte) {
11615 *ptep = newpte;
11616 changed = true;
11617 }
11618 }
11619 }
11620 if (changed)
11621 pmap_invalidate_range(pmap, sva, eva);
11622 }
11623
11624 static int
pmap_pkru_check_uargs(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx,int flags)11625 pmap_pkru_check_uargs(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11626 u_int keyidx, int flags)
11627 {
11628
11629 if (pmap->pm_type != PT_X86 || keyidx > PMAP_MAX_PKRU_IDX ||
11630 (flags & ~(AMD64_PKRU_PERSIST | AMD64_PKRU_EXCL)) != 0)
11631 return (EINVAL);
11632 if (eva <= sva || eva > VM_MAXUSER_ADDRESS)
11633 return (EFAULT);
11634 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
11635 return (ENOTSUP);
11636 return (0);
11637 }
11638
11639 int
pmap_pkru_set(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx,int flags)11640 pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
11641 int flags)
11642 {
11643 int error;
11644
11645 sva = trunc_page(sva);
11646 eva = round_page(eva);
11647 error = pmap_pkru_check_uargs(pmap, sva, eva, keyidx, flags);
11648 if (error != 0)
11649 return (error);
11650 for (;;) {
11651 PMAP_LOCK(pmap);
11652 error = pmap_pkru_assign(pmap, sva, eva, keyidx, flags);
11653 if (error == 0)
11654 pmap_pkru_update_range(pmap, sva, eva, keyidx);
11655 PMAP_UNLOCK(pmap);
11656 if (error != ENOMEM)
11657 break;
11658 vm_wait(NULL);
11659 }
11660 return (error);
11661 }
11662
11663 int
pmap_pkru_clear(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)11664 pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11665 {
11666 int error;
11667
11668 sva = trunc_page(sva);
11669 eva = round_page(eva);
11670 error = pmap_pkru_check_uargs(pmap, sva, eva, 0, 0);
11671 if (error != 0)
11672 return (error);
11673 for (;;) {
11674 PMAP_LOCK(pmap);
11675 error = pmap_pkru_deassign(pmap, sva, eva);
11676 if (error == 0)
11677 pmap_pkru_update_range(pmap, sva, eva, 0);
11678 PMAP_UNLOCK(pmap);
11679 if (error != ENOMEM)
11680 break;
11681 vm_wait(NULL);
11682 }
11683 return (error);
11684 }
11685
11686 #if defined(KASAN) || defined(KMSAN)
11687
11688 /*
11689 * Reserve enough memory to:
11690 * 1) allocate PDP pages for the shadow map(s),
11691 * 2) shadow the boot stack of KSTACK_PAGES pages,
11692 * 3) assuming that the kernel stack does not cross a 1GB boundary,
11693 * so we need one or two PD pages, one or two PT pages, and KSTACK_PAGES shadow
11694 * pages per shadow map.
11695 */
11696 #ifdef KASAN
11697 #define SAN_EARLY_PAGES \
11698 (NKASANPML4E + 2 + 2 + howmany(KSTACK_PAGES, KASAN_SHADOW_SCALE))
11699 #else
11700 #define SAN_EARLY_PAGES \
11701 (NKMSANSHADPML4E + NKMSANORIGPML4E + 2 * (2 + 2 + KSTACK_PAGES))
11702 #endif
11703
11704 static uint64_t __nosanitizeaddress __nosanitizememory
pmap_san_enter_early_alloc_4k(uint64_t pabase)11705 pmap_san_enter_early_alloc_4k(uint64_t pabase)
11706 {
11707 static uint8_t data[PAGE_SIZE * SAN_EARLY_PAGES] __aligned(PAGE_SIZE);
11708 static size_t offset = 0;
11709 uint64_t pa;
11710
11711 if (offset == sizeof(data)) {
11712 panic("%s: ran out of memory for the bootstrap shadow map",
11713 __func__);
11714 }
11715
11716 pa = pabase + ((vm_offset_t)&data[offset] - KERNSTART);
11717 offset += PAGE_SIZE;
11718 return (pa);
11719 }
11720
11721 /*
11722 * Map a shadow page, before the kernel has bootstrapped its page tables. This
11723 * is currently only used to shadow the temporary boot stack set up by locore.
11724 */
11725 static void __nosanitizeaddress __nosanitizememory
pmap_san_enter_early(vm_offset_t va)11726 pmap_san_enter_early(vm_offset_t va)
11727 {
11728 static bool first = true;
11729 pml4_entry_t *pml4e;
11730 pdp_entry_t *pdpe;
11731 pd_entry_t *pde;
11732 pt_entry_t *pte;
11733 uint64_t cr3, pa, base;
11734 int i;
11735
11736 base = amd64_loadaddr();
11737 cr3 = rcr3();
11738
11739 if (first) {
11740 /*
11741 * If this the first call, we need to allocate new PML4Es for
11742 * the bootstrap shadow map(s). We don't know how the PML4 page
11743 * was initialized by the boot loader, so we can't simply test
11744 * whether the shadow map's PML4Es are zero.
11745 */
11746 first = false;
11747 #ifdef KASAN
11748 for (i = 0; i < NKASANPML4E; i++) {
11749 pa = pmap_san_enter_early_alloc_4k(base);
11750
11751 pml4e = (pml4_entry_t *)cr3 +
11752 pmap_pml4e_index(KASAN_MIN_ADDRESS + i * NBPML4);
11753 *pml4e = (pml4_entry_t)(pa | X86_PG_RW | X86_PG_V);
11754 }
11755 #else
11756 for (i = 0; i < NKMSANORIGPML4E; i++) {
11757 pa = pmap_san_enter_early_alloc_4k(base);
11758
11759 pml4e = (pml4_entry_t *)cr3 +
11760 pmap_pml4e_index(KMSAN_ORIG_MIN_ADDRESS +
11761 i * NBPML4);
11762 *pml4e = (pml4_entry_t)(pa | X86_PG_RW | X86_PG_V);
11763 }
11764 for (i = 0; i < NKMSANSHADPML4E; i++) {
11765 pa = pmap_san_enter_early_alloc_4k(base);
11766
11767 pml4e = (pml4_entry_t *)cr3 +
11768 pmap_pml4e_index(KMSAN_SHAD_MIN_ADDRESS +
11769 i * NBPML4);
11770 *pml4e = (pml4_entry_t)(pa | X86_PG_RW | X86_PG_V);
11771 }
11772 #endif
11773 }
11774 pml4e = (pml4_entry_t *)cr3 + pmap_pml4e_index(va);
11775 pdpe = (pdp_entry_t *)(*pml4e & PG_FRAME) + pmap_pdpe_index(va);
11776 if (*pdpe == 0) {
11777 pa = pmap_san_enter_early_alloc_4k(base);
11778 *pdpe = (pdp_entry_t)(pa | X86_PG_RW | X86_PG_V);
11779 }
11780 pde = (pd_entry_t *)(*pdpe & PG_FRAME) + pmap_pde_index(va);
11781 if (*pde == 0) {
11782 pa = pmap_san_enter_early_alloc_4k(base);
11783 *pde = (pd_entry_t)(pa | X86_PG_RW | X86_PG_V);
11784 }
11785 pte = (pt_entry_t *)(*pde & PG_FRAME) + pmap_pte_index(va);
11786 if (*pte != 0)
11787 panic("%s: PTE for %#lx is already initialized", __func__, va);
11788 pa = pmap_san_enter_early_alloc_4k(base);
11789 *pte = (pt_entry_t)(pa | X86_PG_A | X86_PG_M | X86_PG_RW | X86_PG_V);
11790 }
11791
11792 static vm_page_t
pmap_san_enter_alloc_4k(void)11793 pmap_san_enter_alloc_4k(void)
11794 {
11795 vm_page_t m;
11796
11797 m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
11798 VM_ALLOC_ZERO);
11799 if (m == NULL)
11800 panic("%s: no memory to grow shadow map", __func__);
11801 return (m);
11802 }
11803
11804 static vm_page_t
pmap_san_enter_alloc_2m(void)11805 pmap_san_enter_alloc_2m(void)
11806 {
11807 return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
11808 NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT));
11809 }
11810
11811 /*
11812 * Grow a shadow map by at least one 4KB page at the specified address. Use 2MB
11813 * pages when possible.
11814 */
11815 void __nosanitizeaddress __nosanitizememory
pmap_san_enter(vm_offset_t va)11816 pmap_san_enter(vm_offset_t va)
11817 {
11818 pdp_entry_t *pdpe;
11819 pd_entry_t *pde;
11820 pt_entry_t *pte;
11821 vm_page_t m;
11822
11823 if (kernphys == 0) {
11824 /*
11825 * We're creating a temporary shadow map for the boot stack.
11826 */
11827 pmap_san_enter_early(va);
11828 return;
11829 }
11830
11831 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
11832
11833 pdpe = pmap_pdpe(kernel_pmap, va);
11834 if ((*pdpe & X86_PG_V) == 0) {
11835 m = pmap_san_enter_alloc_4k();
11836 *pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11837 X86_PG_V | pg_nx);
11838 }
11839 pde = pmap_pdpe_to_pde(pdpe, va);
11840 if ((*pde & X86_PG_V) == 0) {
11841 m = pmap_san_enter_alloc_2m();
11842 if (m != NULL) {
11843 *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11844 X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx);
11845 } else {
11846 m = pmap_san_enter_alloc_4k();
11847 *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11848 X86_PG_V | pg_nx);
11849 }
11850 }
11851 if ((*pde & X86_PG_PS) != 0)
11852 return;
11853 pte = pmap_pde_to_pte(pde, va);
11854 if ((*pte & X86_PG_V) != 0)
11855 return;
11856 m = pmap_san_enter_alloc_4k();
11857 *pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V |
11858 X86_PG_M | X86_PG_A | pg_nx);
11859 }
11860 #endif
11861
11862 /*
11863 * Track a range of the kernel's virtual address space that is contiguous
11864 * in various mapping attributes.
11865 */
11866 struct pmap_kernel_map_range {
11867 vm_offset_t sva;
11868 pt_entry_t attrs;
11869 int ptes;
11870 int pdes;
11871 int pdpes;
11872 };
11873
11874 static void
sysctl_kmaps_dump(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t eva)11875 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
11876 vm_offset_t eva)
11877 {
11878 const char *mode;
11879 int i, pat_idx;
11880
11881 if (eva <= range->sva)
11882 return;
11883
11884 pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true);
11885 for (i = 0; i < PAT_INDEX_SIZE; i++)
11886 if (pat_index[i] == pat_idx)
11887 break;
11888
11889 switch (i) {
11890 case PAT_WRITE_BACK:
11891 mode = "WB";
11892 break;
11893 case PAT_WRITE_THROUGH:
11894 mode = "WT";
11895 break;
11896 case PAT_UNCACHEABLE:
11897 mode = "UC";
11898 break;
11899 case PAT_UNCACHED:
11900 mode = "U-";
11901 break;
11902 case PAT_WRITE_PROTECTED:
11903 mode = "WP";
11904 break;
11905 case PAT_WRITE_COMBINING:
11906 mode = "WC";
11907 break;
11908 default:
11909 printf("%s: unknown PAT mode %#x for range 0x%016lx-0x%016lx\n",
11910 __func__, pat_idx, range->sva, eva);
11911 mode = "??";
11912 break;
11913 }
11914
11915 sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %s %d %d %d\n",
11916 range->sva, eva,
11917 (range->attrs & X86_PG_RW) != 0 ? 'w' : '-',
11918 (range->attrs & pg_nx) != 0 ? '-' : 'x',
11919 (range->attrs & X86_PG_U) != 0 ? 'u' : 's',
11920 (range->attrs & X86_PG_G) != 0 ? 'g' : '-',
11921 mode, range->pdpes, range->pdes, range->ptes);
11922
11923 /* Reset to sentinel value. */
11924 range->sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
11925 NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
11926 NPDEPG - 1, NPTEPG - 1);
11927 }
11928
11929 /*
11930 * Determine whether the attributes specified by a page table entry match those
11931 * being tracked by the current range. This is not quite as simple as a direct
11932 * flag comparison since some PAT modes have multiple representations.
11933 */
11934 static bool
sysctl_kmaps_match(struct pmap_kernel_map_range * range,pt_entry_t attrs)11935 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
11936 {
11937 pt_entry_t diff, mask;
11938
11939 mask = X86_PG_G | X86_PG_RW | X86_PG_U | X86_PG_PDE_CACHE | pg_nx;
11940 diff = (range->attrs ^ attrs) & mask;
11941 if (diff == 0)
11942 return (true);
11943 if ((diff & ~X86_PG_PDE_PAT) == 0 &&
11944 pmap_pat_index(kernel_pmap, range->attrs, true) ==
11945 pmap_pat_index(kernel_pmap, attrs, true))
11946 return (true);
11947 return (false);
11948 }
11949
11950 static void
sysctl_kmaps_reinit(struct pmap_kernel_map_range * range,vm_offset_t va,pt_entry_t attrs)11951 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
11952 pt_entry_t attrs)
11953 {
11954
11955 memset(range, 0, sizeof(*range));
11956 range->sva = va;
11957 range->attrs = attrs;
11958 }
11959
11960 /*
11961 * Given a leaf PTE, derive the mapping's attributes. If they do not match
11962 * those of the current run, dump the address range and its attributes, and
11963 * begin a new run.
11964 */
11965 static void
sysctl_kmaps_check(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t va,pml4_entry_t pml4e,pdp_entry_t pdpe,pd_entry_t pde,pt_entry_t pte)11966 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
11967 vm_offset_t va, pml4_entry_t pml4e, pdp_entry_t pdpe, pd_entry_t pde,
11968 pt_entry_t pte)
11969 {
11970 pt_entry_t attrs;
11971
11972 attrs = pml4e & (X86_PG_RW | X86_PG_U | pg_nx);
11973
11974 attrs |= pdpe & pg_nx;
11975 attrs &= pg_nx | (pdpe & (X86_PG_RW | X86_PG_U));
11976 if ((pdpe & PG_PS) != 0) {
11977 attrs |= pdpe & (X86_PG_G | X86_PG_PDE_CACHE);
11978 } else if (pde != 0) {
11979 attrs |= pde & pg_nx;
11980 attrs &= pg_nx | (pde & (X86_PG_RW | X86_PG_U));
11981 }
11982 if ((pde & PG_PS) != 0) {
11983 attrs |= pde & (X86_PG_G | X86_PG_PDE_CACHE);
11984 } else if (pte != 0) {
11985 attrs |= pte & pg_nx;
11986 attrs &= pg_nx | (pte & (X86_PG_RW | X86_PG_U));
11987 attrs |= pte & (X86_PG_G | X86_PG_PTE_CACHE);
11988
11989 /* Canonicalize by always using the PDE PAT bit. */
11990 if ((attrs & X86_PG_PTE_PAT) != 0)
11991 attrs ^= X86_PG_PDE_PAT | X86_PG_PTE_PAT;
11992 }
11993
11994 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
11995 sysctl_kmaps_dump(sb, range, va);
11996 sysctl_kmaps_reinit(range, va, attrs);
11997 }
11998 }
11999
12000 static int
sysctl_kmaps(SYSCTL_HANDLER_ARGS)12001 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
12002 {
12003 struct pmap_kernel_map_range range;
12004 struct sbuf sbuf, *sb;
12005 pml4_entry_t pml4e;
12006 pdp_entry_t *pdp, pdpe;
12007 pd_entry_t *pd, pde;
12008 pt_entry_t *pt, pte;
12009 vm_offset_t sva;
12010 vm_paddr_t pa;
12011 int error, i, j, k, l;
12012
12013 error = sysctl_wire_old_buffer(req, 0);
12014 if (error != 0)
12015 return (error);
12016 sb = &sbuf;
12017 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
12018
12019 /* Sentinel value. */
12020 range.sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
12021 NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
12022 NPDEPG - 1, NPTEPG - 1);
12023
12024 /*
12025 * Iterate over the kernel page tables without holding the kernel pmap
12026 * lock. Outside of the large map, kernel page table pages are never
12027 * freed, so at worst we will observe inconsistencies in the output.
12028 * Within the large map, ensure that PDP and PD page addresses are
12029 * valid before descending.
12030 */
12031 for (sva = 0, i = pmap_pml4e_index(sva); i < NPML4EPG; i++) {
12032 switch (i) {
12033 case PML4PML4I:
12034 sbuf_printf(sb, "\nRecursive map:\n");
12035 break;
12036 case DMPML4I:
12037 sbuf_printf(sb, "\nDirect map:\n");
12038 break;
12039 #ifdef KASAN
12040 case KASANPML4I:
12041 sbuf_printf(sb, "\nKASAN shadow map:\n");
12042 break;
12043 #endif
12044 #ifdef KMSAN
12045 case KMSANSHADPML4I:
12046 sbuf_printf(sb, "\nKMSAN shadow map:\n");
12047 break;
12048 case KMSANORIGPML4I:
12049 sbuf_printf(sb, "\nKMSAN origin map:\n");
12050 break;
12051 #endif
12052 case KPML4BASE:
12053 sbuf_printf(sb, "\nKernel map:\n");
12054 break;
12055 case LMSPML4I:
12056 sbuf_printf(sb, "\nLarge map:\n");
12057 break;
12058 }
12059
12060 /* Convert to canonical form. */
12061 if (sva == 1ul << 47)
12062 sva |= -1ul << 48;
12063
12064 restart:
12065 pml4e = kernel_pml4[i];
12066 if ((pml4e & X86_PG_V) == 0) {
12067 sva = rounddown2(sva, NBPML4);
12068 sysctl_kmaps_dump(sb, &range, sva);
12069 sva += NBPML4;
12070 continue;
12071 }
12072 pa = pml4e & PG_FRAME;
12073 pdp = (pdp_entry_t *)PHYS_TO_DMAP(pa);
12074
12075 for (j = pmap_pdpe_index(sva); j < NPDPEPG; j++) {
12076 pdpe = pdp[j];
12077 if ((pdpe & X86_PG_V) == 0) {
12078 sva = rounddown2(sva, NBPDP);
12079 sysctl_kmaps_dump(sb, &range, sva);
12080 sva += NBPDP;
12081 continue;
12082 }
12083 pa = pdpe & PG_FRAME;
12084 if ((pdpe & PG_PS) != 0) {
12085 sva = rounddown2(sva, NBPDP);
12086 sysctl_kmaps_check(sb, &range, sva, pml4e, pdpe,
12087 0, 0);
12088 range.pdpes++;
12089 sva += NBPDP;
12090 continue;
12091 }
12092 if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
12093 vm_phys_paddr_to_vm_page(pa) == NULL) {
12094 /*
12095 * Page table pages for the large map may be
12096 * freed. Validate the next-level address
12097 * before descending.
12098 */
12099 goto restart;
12100 }
12101 pd = (pd_entry_t *)PHYS_TO_DMAP(pa);
12102
12103 for (k = pmap_pde_index(sva); k < NPDEPG; k++) {
12104 pde = pd[k];
12105 if ((pde & X86_PG_V) == 0) {
12106 sva = rounddown2(sva, NBPDR);
12107 sysctl_kmaps_dump(sb, &range, sva);
12108 sva += NBPDR;
12109 continue;
12110 }
12111 pa = pde & PG_FRAME;
12112 if ((pde & PG_PS) != 0) {
12113 sva = rounddown2(sva, NBPDR);
12114 sysctl_kmaps_check(sb, &range, sva,
12115 pml4e, pdpe, pde, 0);
12116 range.pdes++;
12117 sva += NBPDR;
12118 continue;
12119 }
12120 if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
12121 vm_phys_paddr_to_vm_page(pa) == NULL) {
12122 /*
12123 * Page table pages for the large map
12124 * may be freed. Validate the
12125 * next-level address before descending.
12126 */
12127 goto restart;
12128 }
12129 pt = (pt_entry_t *)PHYS_TO_DMAP(pa);
12130
12131 for (l = pmap_pte_index(sva); l < NPTEPG; l++,
12132 sva += PAGE_SIZE) {
12133 pte = pt[l];
12134 if ((pte & X86_PG_V) == 0) {
12135 sysctl_kmaps_dump(sb, &range,
12136 sva);
12137 continue;
12138 }
12139 sysctl_kmaps_check(sb, &range, sva,
12140 pml4e, pdpe, pde, pte);
12141 range.ptes++;
12142 }
12143 }
12144 }
12145 }
12146
12147 error = sbuf_finish(sb);
12148 sbuf_delete(sb);
12149 return (error);
12150 }
12151 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
12152 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
12153 NULL, 0, sysctl_kmaps, "A",
12154 "Dump kernel address layout");
12155
12156 #ifdef DDB
DB_SHOW_COMMAND(pte,pmap_print_pte)12157 DB_SHOW_COMMAND(pte, pmap_print_pte)
12158 {
12159 pmap_t pmap;
12160 pml5_entry_t *pml5;
12161 pml4_entry_t *pml4;
12162 pdp_entry_t *pdp;
12163 pd_entry_t *pde;
12164 pt_entry_t *pte, PG_V;
12165 vm_offset_t va;
12166
12167 if (!have_addr) {
12168 db_printf("show pte addr\n");
12169 return;
12170 }
12171 va = (vm_offset_t)addr;
12172
12173 if (kdb_thread != NULL)
12174 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
12175 else
12176 pmap = PCPU_GET(curpmap);
12177
12178 PG_V = pmap_valid_bit(pmap);
12179 db_printf("VA 0x%016lx", va);
12180
12181 if (pmap_is_la57(pmap)) {
12182 pml5 = pmap_pml5e(pmap, va);
12183 db_printf(" pml5e 0x%016lx", *pml5);
12184 if ((*pml5 & PG_V) == 0) {
12185 db_printf("\n");
12186 return;
12187 }
12188 pml4 = pmap_pml5e_to_pml4e(pml5, va);
12189 } else {
12190 pml4 = pmap_pml4e(pmap, va);
12191 }
12192 db_printf(" pml4e 0x%016lx", *pml4);
12193 if ((*pml4 & PG_V) == 0) {
12194 db_printf("\n");
12195 return;
12196 }
12197 pdp = pmap_pml4e_to_pdpe(pml4, va);
12198 db_printf(" pdpe 0x%016lx", *pdp);
12199 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
12200 db_printf("\n");
12201 return;
12202 }
12203 pde = pmap_pdpe_to_pde(pdp, va);
12204 db_printf(" pde 0x%016lx", *pde);
12205 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
12206 db_printf("\n");
12207 return;
12208 }
12209 pte = pmap_pde_to_pte(pde, va);
12210 db_printf(" pte 0x%016lx\n", *pte);
12211 }
12212
DB_SHOW_COMMAND(phys2dmap,pmap_phys2dmap)12213 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
12214 {
12215 vm_paddr_t a;
12216
12217 if (have_addr) {
12218 a = (vm_paddr_t)addr;
12219 db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
12220 } else {
12221 db_printf("show phys2dmap addr\n");
12222 }
12223 }
12224
12225 static void
ptpages_show_page(int level,int idx,vm_page_t pg)12226 ptpages_show_page(int level, int idx, vm_page_t pg)
12227 {
12228 db_printf("l %d i %d pg %p phys %#lx ref %x\n",
12229 level, idx, pg, VM_PAGE_TO_PHYS(pg), pg->ref_count);
12230 }
12231
12232 static void
ptpages_show_complain(int level,int idx,uint64_t pte)12233 ptpages_show_complain(int level, int idx, uint64_t pte)
12234 {
12235 db_printf("l %d i %d pte %#lx\n", level, idx, pte);
12236 }
12237
12238 static void
ptpages_show_pml4(vm_page_t pg4,int num_entries,uint64_t PG_V)12239 ptpages_show_pml4(vm_page_t pg4, int num_entries, uint64_t PG_V)
12240 {
12241 vm_page_t pg3, pg2, pg1;
12242 pml4_entry_t *pml4;
12243 pdp_entry_t *pdp;
12244 pd_entry_t *pd;
12245 int i4, i3, i2;
12246
12247 pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg4));
12248 for (i4 = 0; i4 < num_entries; i4++) {
12249 if ((pml4[i4] & PG_V) == 0)
12250 continue;
12251 pg3 = PHYS_TO_VM_PAGE(pml4[i4] & PG_FRAME);
12252 if (pg3 == NULL) {
12253 ptpages_show_complain(3, i4, pml4[i4]);
12254 continue;
12255 }
12256 ptpages_show_page(3, i4, pg3);
12257 pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg3));
12258 for (i3 = 0; i3 < NPDPEPG; i3++) {
12259 if ((pdp[i3] & PG_V) == 0)
12260 continue;
12261 pg2 = PHYS_TO_VM_PAGE(pdp[i3] & PG_FRAME);
12262 if (pg3 == NULL) {
12263 ptpages_show_complain(2, i3, pdp[i3]);
12264 continue;
12265 }
12266 ptpages_show_page(2, i3, pg2);
12267 pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg2));
12268 for (i2 = 0; i2 < NPDEPG; i2++) {
12269 if ((pd[i2] & PG_V) == 0)
12270 continue;
12271 pg1 = PHYS_TO_VM_PAGE(pd[i2] & PG_FRAME);
12272 if (pg1 == NULL) {
12273 ptpages_show_complain(1, i2, pd[i2]);
12274 continue;
12275 }
12276 ptpages_show_page(1, i2, pg1);
12277 }
12278 }
12279 }
12280 }
12281
DB_SHOW_COMMAND(ptpages,pmap_ptpages)12282 DB_SHOW_COMMAND(ptpages, pmap_ptpages)
12283 {
12284 pmap_t pmap;
12285 vm_page_t pg;
12286 pml5_entry_t *pml5;
12287 uint64_t PG_V;
12288 int i5;
12289
12290 if (have_addr)
12291 pmap = (pmap_t)addr;
12292 else
12293 pmap = PCPU_GET(curpmap);
12294
12295 PG_V = pmap_valid_bit(pmap);
12296
12297 if (pmap_is_la57(pmap)) {
12298 pml5 = pmap->pm_pmltop;
12299 for (i5 = 0; i5 < NUPML5E; i5++) {
12300 if ((pml5[i5] & PG_V) == 0)
12301 continue;
12302 pg = PHYS_TO_VM_PAGE(pml5[i5] & PG_FRAME);
12303 if (pg == NULL) {
12304 ptpages_show_complain(4, i5, pml5[i5]);
12305 continue;
12306 }
12307 ptpages_show_page(4, i5, pg);
12308 ptpages_show_pml4(pg, NPML4EPG, PG_V);
12309 }
12310 } else {
12311 ptpages_show_pml4(PHYS_TO_VM_PAGE(DMAP_TO_PHYS(
12312 (vm_offset_t)pmap->pm_pmltop)), NUP4ML4E, PG_V);
12313 }
12314 }
12315 #endif
12316