1 /* $NetBSD: pmap.c,v 1.75 2023/02/26 07:13:55 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1992, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * the Systems Programming Group of the University of Utah Computer
39 * Science Department and Ralph Campbell.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66 */
67
68 #include <sys/cdefs.h>
69
70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.75 2023/02/26 07:13:55 skrll Exp $");
71
72 /*
73 * Manages physical address maps.
74 *
75 * In addition to hardware address maps, this
76 * module is called upon to provide software-use-only
77 * maps which may or may not be stored in the same
78 * form as hardware maps. These pseudo-maps are
79 * used to store intermediate results from copy
80 * operations to and from address spaces.
81 *
82 * Since the information managed by this module is
83 * also stored by the logical address mapping module,
84 * this module may throw away valid virtual-to-physical
85 * mappings at almost any time. However, invalidations
86 * of virtual-to-physical mappings must be done as
87 * requested.
88 *
89 * In order to cope with hardware architectures which
90 * make virtual-to-physical map invalidates expensive,
91 * this module may delay invalidate or reduced protection
92 * operations until such time as they are actually
93 * necessary. This module is given full information as
94 * to which processors are currently using which maps,
95 * and to when physical maps must be made correct.
96 */
97
98 #include "opt_ddb.h"
99 #include "opt_efi.h"
100 #include "opt_modular.h"
101 #include "opt_multiprocessor.h"
102 #include "opt_sysv.h"
103 #include "opt_uvmhist.h"
104
105 #define __PMAP_PRIVATE
106
107 #include <sys/param.h>
108
109 #include <sys/asan.h>
110 #include <sys/atomic.h>
111 #include <sys/buf.h>
112 #include <sys/cpu.h>
113 #include <sys/mutex.h>
114 #include <sys/pool.h>
115
116 #include <uvm/uvm.h>
117 #include <uvm/uvm_physseg.h>
118 #include <uvm/pmap/pmap_pvt.h>
119
120 #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
121 && !defined(PMAP_NO_PV_UNCACHED)
122 #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
123 PMAP_NO_PV_UNCACHED to be defined
124 #endif
125
126 #if defined(PMAP_PV_TRACK_ONLY_STUBS)
127 #undef __HAVE_PMAP_PV_TRACK
128 #endif
129
130 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
131 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
132 PMAP_COUNTER(remove_user_calls, "remove user calls");
133 PMAP_COUNTER(remove_user_pages, "user pages unmapped");
134 PMAP_COUNTER(remove_flushes, "remove cache flushes");
135 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
136 PMAP_COUNTER(remove_pvfirst, "remove pv first");
137 PMAP_COUNTER(remove_pvsearch, "remove pv search");
138
139 PMAP_COUNTER(prefer_requests, "prefer requests");
140 PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
141
142 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
143
144 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
145 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
146 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
147 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
148
149 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
150 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
151
152 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
153 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
154 PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
155 PMAP_COUNTER(user_mappings, "user pages mapped");
156 PMAP_COUNTER(user_mappings_changed, "user mapping changed");
157 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
158 PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
159 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
160 PMAP_COUNTER(pvtracked_mappings, "pv-tracked unmanaged pages mapped");
161 PMAP_COUNTER(efirt_mappings, "EFI RT pages mapped");
162 PMAP_COUNTER(managed_mappings, "managed pages mapped");
163 PMAP_COUNTER(mappings, "pages mapped");
164 PMAP_COUNTER(remappings, "pages remapped");
165 PMAP_COUNTER(unmappings, "pages unmapped");
166 PMAP_COUNTER(primary_mappings, "page initial mappings");
167 PMAP_COUNTER(primary_unmappings, "page final unmappings");
168 PMAP_COUNTER(tlb_hit, "page mapping");
169
170 PMAP_COUNTER(exec_mappings, "exec pages mapped");
171 PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
172 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
173 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
174 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
175 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
176 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
177 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
178 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
179 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
180 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
181
182 PMAP_COUNTER(create, "creates");
183 PMAP_COUNTER(reference, "references");
184 PMAP_COUNTER(dereference, "dereferences");
185 PMAP_COUNTER(destroy, "destroyed");
186 PMAP_COUNTER(activate, "activations");
187 PMAP_COUNTER(deactivate, "deactivations");
188 PMAP_COUNTER(update, "updates");
189 #ifdef MULTIPROCESSOR
190 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
191 #endif
192 PMAP_COUNTER(unwire, "unwires");
193 PMAP_COUNTER(copy, "copies");
194 PMAP_COUNTER(clear_modify, "clear_modifies");
195 PMAP_COUNTER(protect, "protects");
196 PMAP_COUNTER(page_protect, "page_protects");
197
198 #define PMAP_ASID_RESERVED 0
199 CTASSERT(PMAP_ASID_RESERVED == 0);
200
201 #ifdef PMAP_HWPAGEWALKER
202 #ifndef PMAP_PDETAB_ALIGN
203 #define PMAP_PDETAB_ALIGN /* nothing */
204 #endif
205
206 #ifdef _LP64
207 pmap_pdetab_t pmap_kstart_pdetab PMAP_PDETAB_ALIGN; /* first mid-level pdetab for kernel */
208 #endif
209 pmap_pdetab_t pmap_kern_pdetab PMAP_PDETAB_ALIGN;
210 #endif
211
212 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
213 #ifndef PMAP_SEGTAB_ALIGN
214 #define PMAP_SEGTAB_ALIGN /* nothing */
215 #endif
216 #ifdef _LP64
217 pmap_segtab_t pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
218 #endif
219 pmap_segtab_t pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
220 #ifdef _LP64
221 .seg_seg[(VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NSEGPG - 1)] = &pmap_kstart_segtab,
222 #endif
223 };
224 #endif
225
226 struct pmap_kernel kernel_pmap_store = {
227 .kernel_pmap = {
228 .pm_refcnt = 1,
229 #ifdef PMAP_HWPAGEWALKER
230 .pm_pdetab = PMAP_INVALID_PDETAB_ADDRESS,
231 #endif
232 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
233 .pm_segtab = &pmap_kern_segtab,
234 #endif
235 .pm_minaddr = VM_MIN_KERNEL_ADDRESS,
236 .pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
237 },
238 };
239
240 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
241
242 #if defined(EFI_RUNTIME)
243 static struct pmap efirt_pmap;
244
245 pmap_t
pmap_efirt(void)246 pmap_efirt(void)
247 {
248 return &efirt_pmap;
249 }
250 #else
251 static inline pt_entry_t
pte_make_enter_efirt(paddr_t pa,vm_prot_t prot,u_int flags)252 pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
253 {
254 panic("not supported");
255 }
256 #endif
257
258 /* The current top of kernel VM - gets updated by pmap_growkernel */
259 vaddr_t pmap_curmaxkvaddr;
260
261 struct pmap_limits pmap_limits = { /* VA and PA limits */
262 .virtual_start = VM_MIN_KERNEL_ADDRESS,
263 .virtual_end = VM_MAX_KERNEL_ADDRESS,
264 };
265
266 #ifdef UVMHIST
267 static struct kern_history_ent pmapexechistbuf[10000];
268 static struct kern_history_ent pmaphistbuf[10000];
269 static struct kern_history_ent pmapxtabhistbuf[5000];
270 UVMHIST_DEFINE(pmapexechist) = UVMHIST_INITIALIZER(pmapexechist, pmapexechistbuf);
271 UVMHIST_DEFINE(pmaphist) = UVMHIST_INITIALIZER(pmaphist, pmaphistbuf);
272 UVMHIST_DEFINE(pmapxtabhist) = UVMHIST_INITIALIZER(pmapxtabhist, pmapxtabhistbuf);
273 #endif
274
275 /*
276 * The pools from which pmap structures and sub-structures are allocated.
277 */
278 struct pool pmap_pmap_pool;
279 struct pool pmap_pv_pool;
280
281 #ifndef PMAP_PV_LOWAT
282 #define PMAP_PV_LOWAT 16
283 #endif
284 int pmap_pv_lowat = PMAP_PV_LOWAT;
285
286 bool pmap_initialized = false;
287 #define PMAP_PAGE_COLOROK_P(a, b) \
288 ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
289 u_int pmap_page_colormask;
290
291 #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa))
292
293 #define PMAP_IS_ACTIVE(pm) \
294 ((pm) == pmap_kernel() || \
295 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
296
297 /* Forward function declarations */
298 void pmap_page_remove(struct vm_page_md *);
299 static void pmap_pvlist_check(struct vm_page_md *);
300 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
301 void pmap_enter_pv(pmap_t, vaddr_t, paddr_t, struct vm_page_md *, pt_entry_t *, u_int);
302
303 /*
304 * PV table management functions.
305 */
306 void *pmap_pv_page_alloc(struct pool *, int);
307 void pmap_pv_page_free(struct pool *, void *);
308
309 struct pool_allocator pmap_pv_page_allocator = {
310 pmap_pv_page_alloc, pmap_pv_page_free, 0,
311 };
312
313 #define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT)
314 #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv))
315
316 #ifndef PMAP_NEED_TLB_MISS_LOCK
317
318 #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG)
319 #define PMAP_NEED_TLB_MISS_LOCK
320 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */
321
322 #endif /* PMAP_NEED_TLB_MISS_LOCK */
323
324 #ifdef PMAP_NEED_TLB_MISS_LOCK
325
326 #ifdef PMAP_MD_NEED_TLB_MISS_LOCK
327 #define pmap_tlb_miss_lock_init() __nothing /* MD code deals with this */
328 #define pmap_tlb_miss_lock_enter() pmap_md_tlb_miss_lock_enter()
329 #define pmap_tlb_miss_lock_exit() pmap_md_tlb_miss_lock_exit()
330 #else
331 kmutex_t pmap_tlb_miss_lock __cacheline_aligned;
332
333 static void
pmap_tlb_miss_lock_init(void)334 pmap_tlb_miss_lock_init(void)
335 {
336 mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
337 }
338
339 static inline void
pmap_tlb_miss_lock_enter(void)340 pmap_tlb_miss_lock_enter(void)
341 {
342 mutex_spin_enter(&pmap_tlb_miss_lock);
343 }
344
345 static inline void
pmap_tlb_miss_lock_exit(void)346 pmap_tlb_miss_lock_exit(void)
347 {
348 mutex_spin_exit(&pmap_tlb_miss_lock);
349 }
350 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */
351
352 #else
353
354 #define pmap_tlb_miss_lock_init() __nothing
355 #define pmap_tlb_miss_lock_enter() __nothing
356 #define pmap_tlb_miss_lock_exit() __nothing
357
358 #endif /* PMAP_NEED_TLB_MISS_LOCK */
359
360 #ifndef MULTIPROCESSOR
361 kmutex_t pmap_pvlist_mutex __cacheline_aligned;
362 #endif
363
364 /*
365 * Debug functions.
366 */
367
368 #ifdef DEBUG
369 static inline void
pmap_asid_check(pmap_t pm,const char * func)370 pmap_asid_check(pmap_t pm, const char *func)
371 {
372 if (!PMAP_IS_ACTIVE(pm))
373 return;
374
375 struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
376 tlb_asid_t asid = tlb_get_asid();
377 if (asid != pai->pai_asid)
378 panic("%s: inconsistency for active TLB update: %u <-> %u",
379 func, asid, pai->pai_asid);
380 }
381 #endif
382
383 static void
pmap_addr_range_check(pmap_t pmap,vaddr_t sva,vaddr_t eva,const char * func)384 pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
385 {
386 #ifdef DEBUG
387 if (pmap == pmap_kernel()) {
388 if (sva < VM_MIN_KERNEL_ADDRESS)
389 panic("%s: kva %#"PRIxVADDR" not in range",
390 func, sva);
391 if (eva >= pmap_limits.virtual_end)
392 panic("%s: kva %#"PRIxVADDR" not in range",
393 func, eva);
394 } else {
395 if (eva > VM_MAXUSER_ADDRESS)
396 panic("%s: uva %#"PRIxVADDR" not in range",
397 func, eva);
398 pmap_asid_check(pmap, func);
399 }
400 #endif
401 }
402
403 /*
404 * Misc. functions.
405 */
406
407 bool
pmap_page_clear_attributes(struct vm_page_md * mdpg,u_int clear_attributes)408 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
409 {
410 volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
411
412 #ifdef MULTIPROCESSOR
413 for (;;) {
414 u_int old_attr = *attrp;
415 if ((old_attr & clear_attributes) == 0)
416 return false;
417 u_int new_attr = old_attr & ~clear_attributes;
418 if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
419 return true;
420 }
421 #else
422 unsigned long old_attr = *attrp;
423 if ((old_attr & clear_attributes) == 0)
424 return false;
425 *attrp &= ~clear_attributes;
426 return true;
427 #endif
428 }
429
430 void
pmap_page_set_attributes(struct vm_page_md * mdpg,u_int set_attributes)431 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
432 {
433 #ifdef MULTIPROCESSOR
434 atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
435 #else
436 mdpg->mdpg_attrs |= set_attributes;
437 #endif
438 }
439
440 static void
pmap_page_syncicache(struct vm_page * pg)441 pmap_page_syncicache(struct vm_page *pg)
442 {
443 UVMHIST_FUNC(__func__);
444 UVMHIST_CALLED(pmaphist);
445 #ifndef MULTIPROCESSOR
446 struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
447 #endif
448 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
449 pv_entry_t pv = &mdpg->mdpg_first;
450 kcpuset_t *onproc;
451 #ifdef MULTIPROCESSOR
452 kcpuset_create(&onproc, true);
453 KASSERT(onproc != NULL);
454 #else
455 onproc = NULL;
456 #endif
457 VM_PAGEMD_PVLIST_READLOCK(mdpg);
458 pmap_pvlist_check(mdpg);
459
460 UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx", (uintptr_t)pv,
461 (uintptr_t)pv->pv_pmap, 0, 0);
462
463 if (pv->pv_pmap != NULL) {
464 for (; pv != NULL; pv = pv->pv_next) {
465 #ifdef MULTIPROCESSOR
466 UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx",
467 (uintptr_t)pv, (uintptr_t)pv->pv_pmap, 0, 0);
468 kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
469 if (kcpuset_match(onproc, kcpuset_running)) {
470 break;
471 }
472 #else
473 if (pv->pv_pmap == curpmap) {
474 onproc = curcpu()->ci_kcpuset;
475 break;
476 }
477 #endif
478 }
479 }
480 pmap_pvlist_check(mdpg);
481 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
482 kpreempt_disable();
483 pmap_md_page_syncicache(mdpg, onproc);
484 kpreempt_enable();
485 #ifdef MULTIPROCESSOR
486 kcpuset_destroy(onproc);
487 #endif
488 }
489
490 /*
491 * Define the initial bounds of the kernel virtual address space.
492 */
493 void
pmap_virtual_space(vaddr_t * vstartp,vaddr_t * vendp)494 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
495 {
496 *vstartp = pmap_limits.virtual_start;
497 *vendp = pmap_limits.virtual_end;
498 }
499
500 vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)501 pmap_growkernel(vaddr_t maxkvaddr)
502 {
503 UVMHIST_FUNC(__func__);
504 UVMHIST_CALLARGS(pmaphist, "maxkvaddr=%#jx (%#jx)", maxkvaddr,
505 pmap_curmaxkvaddr, 0, 0);
506
507 vaddr_t virtual_end = pmap_curmaxkvaddr;
508 maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
509
510 /*
511 * Don't exceed VM_MAX_KERNEL_ADDRESS!
512 */
513 if (maxkvaddr == 0 || maxkvaddr > VM_MAX_KERNEL_ADDRESS)
514 maxkvaddr = VM_MAX_KERNEL_ADDRESS;
515
516 /*
517 * Reserve PTEs for the new KVA space.
518 */
519 for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
520 pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
521 }
522
523 kasan_shadow_map((void *)pmap_curmaxkvaddr,
524 (size_t)(virtual_end - pmap_curmaxkvaddr));
525
526 /*
527 * Update new end.
528 */
529 pmap_curmaxkvaddr = virtual_end;
530
531 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
532
533 return virtual_end;
534 }
535
536 /*
537 * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
538 * This function allows for early dynamic memory allocation until the virtual
539 * memory system has been bootstrapped. After that point, either kmem_alloc
540 * or malloc should be used. This function works by stealing pages from the
541 * (to be) managed page pool, then implicitly mapping the pages (by using
542 * their direct mapped addresses) and zeroing them.
543 *
544 * It may be used once the physical memory segments have been pre-loaded
545 * into the vm_physmem[] array. Early memory allocation MUST use this
546 * interface! This cannot be used after vm_page_startup(), and will
547 * generate a panic if tried.
548 *
549 * Note that this memory will never be freed, and in essence it is wired
550 * down.
551 *
552 * We must adjust *vstartp and/or *vendp iff we use address space
553 * from the kernel virtual address range defined by pmap_virtual_space().
554 */
555 vaddr_t
pmap_steal_memory(vsize_t size,vaddr_t * vstartp,vaddr_t * vendp)556 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
557 {
558 size_t npgs;
559 paddr_t pa;
560 vaddr_t va;
561
562 uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID;
563
564 size = round_page(size);
565 npgs = atop(size);
566
567 aprint_debug("%s: need %zu pages\n", __func__, npgs);
568
569 for (uvm_physseg_t bank = uvm_physseg_get_first();
570 uvm_physseg_valid_p(bank);
571 bank = uvm_physseg_get_next(bank)) {
572
573 if (uvm.page_init_done == true)
574 panic("pmap_steal_memory: called _after_ bootstrap");
575
576 aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
577 __func__, bank,
578 uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
579 uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
580
581 if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
582 || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
583 aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank);
584 continue;
585 }
586
587 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
588 aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n",
589 __func__, bank, npgs);
590 continue;
591 }
592
593 if (!pmap_md_ok_to_steal_p(bank, npgs)) {
594 continue;
595 }
596
597 /*
598 * Always try to allocate from the segment with the least
599 * amount of space left.
600 */
601 #define VM_PHYSMEM_SPACE(b) ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
602 if (uvm_physseg_valid_p(maybe_bank) == false
603 || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
604 maybe_bank = bank;
605 }
606 }
607
608 if (uvm_physseg_valid_p(maybe_bank)) {
609 const uvm_physseg_t bank = maybe_bank;
610
611 /*
612 * There are enough pages here; steal them!
613 */
614 pa = ptoa(uvm_physseg_get_start(bank));
615 uvm_physseg_unplug(atop(pa), npgs);
616
617 aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n",
618 __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
619
620 va = pmap_md_map_poolpage(pa, size);
621 memset((void *)va, 0, size);
622 return va;
623 }
624
625 /*
626 * If we got here, there was no memory left.
627 */
628 panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
629 }
630
631 /*
632 * Bootstrap the system enough to run with virtual memory.
633 * (Common routine called by machine-dependent bootstrap code.)
634 */
635 void
pmap_bootstrap_common(void)636 pmap_bootstrap_common(void)
637 {
638 UVMHIST_LINK_STATIC(pmapexechist);
639 UVMHIST_LINK_STATIC(pmaphist);
640 UVMHIST_LINK_STATIC(pmapxtabhist);
641
642 static const struct uvm_pagerops pmap_pager = {
643 /* nothing */
644 };
645
646 pmap_t pm = pmap_kernel();
647
648 rw_init(&pm->pm_obj_lock);
649 uvm_obj_init(&pm->pm_uobject, &pmap_pager, false, 1);
650 uvm_obj_setlock(&pm->pm_uobject, &pm->pm_obj_lock);
651
652 TAILQ_INIT(&pm->pm_ppg_list);
653
654 #if defined(PMAP_HWPAGEWALKER)
655 TAILQ_INIT(&pm->pm_pdetab_list);
656 #endif
657 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
658 TAILQ_INIT(&pm->pm_segtab_list);
659 #endif
660
661 #if defined(EFI_RUNTIME)
662
663 const pmap_t efipm = pmap_efirt();
664 struct pmap_asid_info * const efipai = PMAP_PAI(efipm, cpu_tlb_info(ci));
665
666 rw_init(&efipm->pm_obj_lock);
667 uvm_obj_init(&efipm->pm_uobject, &pmap_pager, false, 1);
668 uvm_obj_setlock(&efipm->pm_uobject, &efipm->pm_obj_lock);
669
670 efipai->pai_asid = KERNEL_PID;
671
672 TAILQ_INIT(&efipm->pm_ppg_list);
673
674 #if defined(PMAP_HWPAGEWALKER)
675 TAILQ_INIT(&efipm->pm_pdetab_list);
676 #endif
677 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
678 TAILQ_INIT(&efipm->pm_segtab_list);
679 #endif
680
681 #endif
682
683 /*
684 * Initialize the segtab lock.
685 */
686 mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
687
688 pmap_tlb_miss_lock_init();
689 }
690
691 /*
692 * Initialize the pmap module.
693 * Called by vm_init, to initialize any structures that the pmap
694 * system needs to map virtual memory.
695 */
696 void
pmap_init(void)697 pmap_init(void)
698 {
699 UVMHIST_FUNC(__func__);
700 UVMHIST_CALLED(pmaphist);
701
702 /*
703 * Set a low water mark on the pv_entry pool, so that we are
704 * more likely to have these around even in extreme memory
705 * starvation.
706 */
707 pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
708
709 /*
710 * Set the page colormask but allow pmap_md_init to override it.
711 */
712 pmap_page_colormask = ptoa(uvmexp.colormask);
713
714 pmap_md_init();
715
716 /*
717 * Now it is safe to enable pv entry recording.
718 */
719 pmap_initialized = true;
720 }
721
722 /*
723 * Create and return a physical map.
724 *
725 * If the size specified for the map
726 * is zero, the map is an actual physical
727 * map, and may be referenced by the
728 * hardware.
729 *
730 * If the size specified is non-zero,
731 * the map will be used in software only, and
732 * is bounded by that size.
733 */
734 pmap_t
pmap_create(void)735 pmap_create(void)
736 {
737 UVMHIST_FUNC(__func__);
738 UVMHIST_CALLED(pmaphist);
739 PMAP_COUNT(create);
740
741 static const struct uvm_pagerops pmap_pager = {
742 /* nothing */
743 };
744
745 pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
746 memset(pmap, 0, PMAP_SIZE);
747
748 KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
749
750 pmap->pm_refcnt = 1;
751 pmap->pm_minaddr = VM_MIN_ADDRESS;
752 pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
753
754 rw_init(&pmap->pm_obj_lock);
755 uvm_obj_init(&pmap->pm_uobject, &pmap_pager, false, 1);
756 uvm_obj_setlock(&pmap->pm_uobject, &pmap->pm_obj_lock);
757
758 TAILQ_INIT(&pmap->pm_ppg_list);
759 #if defined(PMAP_HWPAGEWALKER)
760 TAILQ_INIT(&pmap->pm_pdetab_list);
761 #endif
762 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
763 TAILQ_INIT(&pmap->pm_segtab_list);
764 #endif
765
766 pmap_segtab_init(pmap);
767
768 #ifdef MULTIPROCESSOR
769 kcpuset_create(&pmap->pm_active, true);
770 kcpuset_create(&pmap->pm_onproc, true);
771 KASSERT(pmap->pm_active != NULL);
772 KASSERT(pmap->pm_onproc != NULL);
773 #endif
774
775 UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap,
776 0, 0, 0);
777
778 return pmap;
779 }
780
781 /*
782 * Retire the given physical map from service.
783 * Should only be called if the map contains
784 * no valid mappings.
785 */
786 void
pmap_destroy(pmap_t pmap)787 pmap_destroy(pmap_t pmap)
788 {
789 UVMHIST_FUNC(__func__);
790 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
791 UVMHIST_CALLARGS(pmapxtabhist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
792
793 membar_release();
794 if (atomic_dec_uint_nv(&pmap->pm_refcnt) > 0) {
795 PMAP_COUNT(dereference);
796 UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
797 UVMHIST_LOG(pmapxtabhist, " <-- done (deref)", 0, 0, 0, 0);
798 return;
799 }
800 membar_acquire();
801
802 PMAP_COUNT(destroy);
803 KASSERT(pmap->pm_refcnt == 0);
804 kpreempt_disable();
805 pmap_tlb_miss_lock_enter();
806 pmap_tlb_asid_release_all(pmap);
807 pmap_tlb_miss_lock_exit();
808 pmap_segtab_destroy(pmap, NULL, 0);
809
810 KASSERT(TAILQ_EMPTY(&pmap->pm_ppg_list));
811
812 #ifdef _LP64
813 #if defined(PMAP_HWPAGEWALKER)
814 KASSERT(TAILQ_EMPTY(&pmap->pm_pdetab_list));
815 #endif
816 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
817 KASSERT(TAILQ_EMPTY(&pmap->pm_segtab_list));
818 #endif
819 #endif
820 KASSERT(pmap->pm_uobject.uo_npages == 0);
821
822 uvm_obj_destroy(&pmap->pm_uobject, false);
823 rw_destroy(&pmap->pm_obj_lock);
824
825 #ifdef MULTIPROCESSOR
826 kcpuset_destroy(pmap->pm_active);
827 kcpuset_destroy(pmap->pm_onproc);
828 pmap->pm_active = NULL;
829 pmap->pm_onproc = NULL;
830 #endif
831
832 pool_put(&pmap_pmap_pool, pmap);
833 kpreempt_enable();
834
835 UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
836 UVMHIST_LOG(pmapxtabhist, " <-- done (freed)", 0, 0, 0, 0);
837 }
838
839 /*
840 * Add a reference to the specified pmap.
841 */
842 void
pmap_reference(pmap_t pmap)843 pmap_reference(pmap_t pmap)
844 {
845 UVMHIST_FUNC(__func__);
846 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
847 PMAP_COUNT(reference);
848
849 if (pmap != NULL) {
850 atomic_inc_uint(&pmap->pm_refcnt);
851 }
852
853 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
854 }
855
856 /*
857 * Make a new pmap (vmspace) active for the given process.
858 */
859 void
pmap_activate(struct lwp * l)860 pmap_activate(struct lwp *l)
861 {
862 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
863
864 UVMHIST_FUNC(__func__);
865 UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
866 (uintptr_t)pmap, 0, 0);
867 PMAP_COUNT(activate);
868
869 kpreempt_disable();
870 pmap_tlb_miss_lock_enter();
871 pmap_tlb_asid_acquire(pmap, l);
872 pmap_segtab_activate(pmap, l);
873 pmap_tlb_miss_lock_exit();
874 kpreempt_enable();
875
876 UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
877 l->l_lid, 0, 0);
878 }
879
880 /*
881 * Remove this page from all physical maps in which it resides.
882 * Reflects back modify bits to the pager.
883 */
884 void
pmap_page_remove(struct vm_page_md * mdpg)885 pmap_page_remove(struct vm_page_md *mdpg)
886 {
887 kpreempt_disable();
888 VM_PAGEMD_PVLIST_LOCK(mdpg);
889 pmap_pvlist_check(mdpg);
890
891 struct vm_page * const pg =
892 VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : NULL;
893
894 UVMHIST_FUNC(__func__);
895 if (pg) {
896 UVMHIST_CALLARGS(pmaphist, "mdpg %#jx pg %#jx (pa %#jx): "
897 "execpage cleared", (uintptr_t)mdpg, (uintptr_t)pg,
898 VM_PAGE_TO_PHYS(pg), 0);
899 } else {
900 UVMHIST_CALLARGS(pmaphist, "mdpg %#jx", (uintptr_t)mdpg, 0,
901 0, 0);
902 }
903
904 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
905 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE | VM_PAGEMD_UNCACHED);
906 #else
907 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
908 #endif
909 PMAP_COUNT(exec_uncached_remove);
910
911 pv_entry_t pv = &mdpg->mdpg_first;
912 if (pv->pv_pmap == NULL) {
913 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
914 kpreempt_enable();
915 UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
916 return;
917 }
918
919 pv_entry_t npv;
920 pv_entry_t pvp = NULL;
921
922 for (; pv != NULL; pv = npv) {
923 npv = pv->pv_next;
924 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
925 if (PV_ISKENTER_P(pv)) {
926 UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
927 " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap,
928 pv->pv_va, 0);
929
930 KASSERT(pv->pv_pmap == pmap_kernel());
931
932 /* Assume no more - it'll get fixed if there are */
933 pv->pv_next = NULL;
934
935 /*
936 * pvp is non-null when we already have a PV_KENTER
937 * pv in pvh_first; otherwise we haven't seen a
938 * PV_KENTER pv and we need to copy this one to
939 * pvh_first
940 */
941 if (pvp) {
942 /*
943 * The previous PV_KENTER pv needs to point to
944 * this PV_KENTER pv
945 */
946 pvp->pv_next = pv;
947 } else {
948 pv_entry_t fpv = &mdpg->mdpg_first;
949 *fpv = *pv;
950 KASSERT(fpv->pv_pmap == pmap_kernel());
951 }
952 pvp = pv;
953 continue;
954 }
955 #endif
956 const pmap_t pmap = pv->pv_pmap;
957 vaddr_t va = trunc_page(pv->pv_va);
958 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
959 KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
960 pmap_limits.virtual_end);
961 pt_entry_t pte = *ptep;
962 UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
963 " pte %#jx", (uintptr_t)pv, (uintptr_t)pmap, va,
964 pte_value(pte));
965 if (!pte_valid_p(pte))
966 continue;
967 const bool is_kernel_pmap_p = (pmap == pmap_kernel());
968 if (is_kernel_pmap_p) {
969 PMAP_COUNT(remove_kernel_pages);
970 } else {
971 PMAP_COUNT(remove_user_pages);
972 }
973 if (pte_wired_p(pte))
974 pmap->pm_stats.wired_count--;
975 pmap->pm_stats.resident_count--;
976
977 pmap_tlb_miss_lock_enter();
978 const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
979 pte_set(ptep, npte);
980 if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
981 /*
982 * Flush the TLB for the given address.
983 */
984 pmap_tlb_invalidate_addr(pmap, va);
985 }
986 pmap_tlb_miss_lock_exit();
987
988 /*
989 * non-null means this is a non-pvh_first pv, so we should
990 * free it.
991 */
992 if (pvp) {
993 KASSERT(pvp->pv_pmap == pmap_kernel());
994 KASSERT(pvp->pv_next == NULL);
995 pmap_pv_free(pv);
996 } else {
997 pv->pv_pmap = NULL;
998 pv->pv_next = NULL;
999 }
1000 }
1001
1002 pmap_pvlist_check(mdpg);
1003 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1004 kpreempt_enable();
1005
1006 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1007 }
1008
1009 #ifdef __HAVE_PMAP_PV_TRACK
1010 /*
1011 * pmap_pv_protect: change protection of an unmanaged pv-tracked page from
1012 * all pmaps that map it
1013 */
1014 void
pmap_pv_protect(paddr_t pa,vm_prot_t prot)1015 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
1016 {
1017
1018 /* the only case is remove at the moment */
1019 KASSERT(prot == VM_PROT_NONE);
1020 struct pmap_page *pp;
1021
1022 pp = pmap_pv_tracked(pa);
1023 if (pp == NULL)
1024 panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
1025 pa);
1026
1027 struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp);
1028 pmap_page_remove(mdpg);
1029 }
1030 #endif
1031
1032 /*
1033 * Make a previously active pmap (vmspace) inactive.
1034 */
1035 void
pmap_deactivate(struct lwp * l)1036 pmap_deactivate(struct lwp *l)
1037 {
1038 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1039
1040 UVMHIST_FUNC(__func__);
1041 UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
1042 (uintptr_t)pmap, 0, 0);
1043 PMAP_COUNT(deactivate);
1044
1045 kpreempt_disable();
1046 KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
1047 pmap_tlb_miss_lock_enter();
1048 pmap_tlb_asid_deactivate(pmap);
1049 pmap_segtab_deactivate(pmap);
1050 pmap_tlb_miss_lock_exit();
1051 kpreempt_enable();
1052
1053 UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
1054 l->l_lid, 0, 0);
1055 }
1056
1057 void
pmap_update(struct pmap * pmap)1058 pmap_update(struct pmap *pmap)
1059 {
1060 UVMHIST_FUNC(__func__);
1061 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
1062 PMAP_COUNT(update);
1063
1064 kpreempt_disable();
1065 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
1066 u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
1067 if (pending && pmap_tlb_shootdown_bystanders(pmap))
1068 PMAP_COUNT(shootdown_ipis);
1069 #endif
1070 pmap_tlb_miss_lock_enter();
1071 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1072 pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
1073 #endif /* DEBUG */
1074
1075 /*
1076 * If pmap_remove_all was called, we deactivated ourselves and nuked
1077 * our ASID. Now we have to reactivate ourselves.
1078 */
1079 if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
1080 pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
1081 pmap_tlb_asid_acquire(pmap, curlwp);
1082 pmap_segtab_activate(pmap, curlwp);
1083 }
1084 pmap_tlb_miss_lock_exit();
1085 kpreempt_enable();
1086
1087 UVMHIST_LOG(pmaphist, " <-- done (kernel=%jd)",
1088 (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0);
1089 }
1090
1091 /*
1092 * Remove the given range of addresses from the specified map.
1093 *
1094 * It is assumed that the start and end are properly
1095 * rounded to the page size.
1096 */
1097
1098 static bool
pmap_pte_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva,pt_entry_t * ptep,uintptr_t flags)1099 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1100 uintptr_t flags)
1101 {
1102 const pt_entry_t npte = flags;
1103 const bool is_kernel_pmap_p = (pmap == pmap_kernel());
1104
1105 UVMHIST_FUNC(__func__);
1106 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
1107 (uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
1108 UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
1109 (uintptr_t)ptep, flags, 0, 0);
1110
1111 KASSERT(kpreempt_disabled());
1112
1113 for (; sva < eva; sva += NBPG, ptep++) {
1114 const pt_entry_t pte = *ptep;
1115 if (!pte_valid_p(pte))
1116 continue;
1117 if (is_kernel_pmap_p) {
1118 PMAP_COUNT(remove_kernel_pages);
1119 } else {
1120 PMAP_COUNT(remove_user_pages);
1121 }
1122 if (pte_wired_p(pte))
1123 pmap->pm_stats.wired_count--;
1124 pmap->pm_stats.resident_count--;
1125 struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
1126 if (__predict_true(pg != NULL)) {
1127 pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
1128 }
1129 pmap_tlb_miss_lock_enter();
1130 pte_set(ptep, npte);
1131 if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
1132 /*
1133 * Flush the TLB for the given address.
1134 */
1135 pmap_tlb_invalidate_addr(pmap, sva);
1136 }
1137 pmap_tlb_miss_lock_exit();
1138 }
1139
1140 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1141
1142 return false;
1143 }
1144
1145 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)1146 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1147 {
1148 const bool is_kernel_pmap_p = (pmap == pmap_kernel());
1149 const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
1150
1151 UVMHIST_FUNC(__func__);
1152 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
1153 (uintptr_t)pmap, sva, eva, 0);
1154
1155 if (is_kernel_pmap_p) {
1156 PMAP_COUNT(remove_kernel_calls);
1157 } else {
1158 PMAP_COUNT(remove_user_calls);
1159 }
1160 #ifdef PMAP_FAULTINFO
1161 curpcb->pcb_faultinfo.pfi_faultaddr = 0;
1162 curpcb->pcb_faultinfo.pfi_repeats = 0;
1163 curpcb->pcb_faultinfo.pfi_faultptep = NULL;
1164 #endif
1165 kpreempt_disable();
1166 pmap_addr_range_check(pmap, sva, eva, __func__);
1167 pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
1168 kpreempt_enable();
1169
1170 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1171 }
1172
1173 /*
1174 * pmap_page_protect:
1175 *
1176 * Lower the permission for all mappings to a given page.
1177 */
1178 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)1179 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1180 {
1181 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1182 pv_entry_t pv;
1183 vaddr_t va;
1184
1185 UVMHIST_FUNC(__func__);
1186 UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
1187 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
1188 PMAP_COUNT(page_protect);
1189
1190 switch (prot) {
1191 case VM_PROT_READ | VM_PROT_WRITE:
1192 case VM_PROT_ALL:
1193 break;
1194
1195 /* copy_on_write */
1196 case VM_PROT_READ:
1197 case VM_PROT_READ | VM_PROT_EXECUTE:
1198 pv = &mdpg->mdpg_first;
1199 kpreempt_disable();
1200 VM_PAGEMD_PVLIST_READLOCK(mdpg);
1201 pmap_pvlist_check(mdpg);
1202 /*
1203 * Loop over all current mappings setting/clearing as
1204 * appropriate.
1205 */
1206 if (pv->pv_pmap != NULL) {
1207 while (pv != NULL) {
1208 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1209 if (PV_ISKENTER_P(pv)) {
1210 pv = pv->pv_next;
1211 continue;
1212 }
1213 #endif
1214 const pmap_t pmap = pv->pv_pmap;
1215 va = trunc_page(pv->pv_va);
1216 const uintptr_t gen =
1217 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1218 pmap_protect(pmap, va, va + PAGE_SIZE, prot);
1219 KASSERT(pv->pv_pmap == pmap);
1220 pmap_update(pmap);
1221 if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
1222 pv = &mdpg->mdpg_first;
1223 } else {
1224 pv = pv->pv_next;
1225 }
1226 pmap_pvlist_check(mdpg);
1227 }
1228 }
1229 pmap_pvlist_check(mdpg);
1230 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1231 kpreempt_enable();
1232 break;
1233
1234 /* remove_all */
1235 default:
1236 pmap_page_remove(mdpg);
1237 }
1238
1239 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1240 }
1241
1242 static bool
pmap_pte_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,pt_entry_t * ptep,uintptr_t flags)1243 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1244 uintptr_t flags)
1245 {
1246 const vm_prot_t prot = (flags & VM_PROT_ALL);
1247
1248 UVMHIST_FUNC(__func__);
1249 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
1250 (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
1251 UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
1252 (uintptr_t)ptep, flags, 0, 0);
1253
1254 KASSERT(kpreempt_disabled());
1255 /*
1256 * Change protection on every valid mapping within this segment.
1257 */
1258 for (; sva < eva; sva += NBPG, ptep++) {
1259 pt_entry_t pte = *ptep;
1260 if (!pte_valid_p(pte))
1261 continue;
1262 struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
1263 if (pg != NULL && pte_modified_p(pte)) {
1264 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1265 if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
1266 KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg));
1267 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1268 if (VM_PAGEMD_CACHED_P(mdpg)) {
1269 #endif
1270 UVMHIST_LOG(pmapexechist,
1271 "pg %#jx (pa %#jx): "
1272 "syncicached performed",
1273 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg),
1274 0, 0);
1275 pmap_page_syncicache(pg);
1276 PMAP_COUNT(exec_synced_protect);
1277 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1278 }
1279 #endif
1280 }
1281 }
1282 pte = pte_prot_downgrade(pte, prot);
1283 if (*ptep != pte) {
1284 pmap_tlb_miss_lock_enter();
1285 pte_set(ptep, pte);
1286 /*
1287 * Update the TLB if needed.
1288 */
1289 pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
1290 pmap_tlb_miss_lock_exit();
1291 }
1292 }
1293
1294 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1295
1296 return false;
1297 }
1298
1299 /*
1300 * Set the physical protection on the
1301 * specified range of this map as requested.
1302 */
1303 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)1304 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1305 {
1306 UVMHIST_FUNC(__func__);
1307 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
1308 (uintptr_t)pmap, sva, eva, prot);
1309 PMAP_COUNT(protect);
1310
1311 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1312 pmap_remove(pmap, sva, eva);
1313 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1314 return;
1315 }
1316
1317 /*
1318 * Change protection on every valid mapping within this segment.
1319 */
1320 kpreempt_disable();
1321 pmap_addr_range_check(pmap, sva, eva, __func__);
1322 pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
1323 kpreempt_enable();
1324
1325 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1326 }
1327
1328 #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
1329 /*
1330 * pmap_page_cache:
1331 *
1332 * Change all mappings of a managed page to cached/uncached.
1333 */
1334 void
pmap_page_cache(struct vm_page_md * mdpg,bool cached)1335 pmap_page_cache(struct vm_page_md *mdpg, bool cached)
1336 {
1337 #ifdef UVMHIST
1338 const bool vmpage_p = VM_PAGEMD_VMPAGE_P(mdpg);
1339 struct vm_page * const pg = vmpage_p ? VM_MD_TO_PAGE(mdpg) : NULL;
1340 #endif
1341
1342 UVMHIST_FUNC(__func__);
1343 UVMHIST_CALLARGS(pmaphist, "(mdpg=%#jx (pa %#jx) cached=%jd vmpage %jd)",
1344 (uintptr_t)mdpg, pg ? VM_PAGE_TO_PHYS(pg) : 0, cached, vmpage_p);
1345
1346 KASSERT(kpreempt_disabled());
1347 KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
1348
1349 if (cached) {
1350 pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
1351 PMAP_COUNT(page_cache_restorations);
1352 } else {
1353 pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
1354 PMAP_COUNT(page_cache_evictions);
1355 }
1356
1357 for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
1358 pmap_t pmap = pv->pv_pmap;
1359 vaddr_t va = trunc_page(pv->pv_va);
1360
1361 KASSERT(pmap != NULL);
1362 KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
1363 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1364 if (ptep == NULL)
1365 continue;
1366 pt_entry_t pte = *ptep;
1367 if (pte_valid_p(pte)) {
1368 pte = pte_cached_change(pte, cached);
1369 pmap_tlb_miss_lock_enter();
1370 pte_set(ptep, pte);
1371 pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
1372 pmap_tlb_miss_lock_exit();
1373 }
1374 }
1375
1376 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1377 }
1378 #endif /* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
1379
1380 /*
1381 * Insert the given physical page (p) at
1382 * the specified virtual address (v) in the
1383 * target physical map with the protection requested.
1384 *
1385 * If specified, the page will be wired down, meaning
1386 * that the related pte can not be reclaimed.
1387 *
1388 * NB: This is the only routine which MAY NOT lazy-evaluate
1389 * or lose information. That is, this routine must actually
1390 * insert this page into the given map NOW.
1391 */
1392 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1393 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1394 {
1395 const bool wired = (flags & PMAP_WIRED) != 0;
1396 const bool is_kernel_pmap_p = (pmap == pmap_kernel());
1397 #if defined(EFI_RUNTIME)
1398 const bool is_efirt_pmap_p = (pmap == pmap_efirt());
1399 #else
1400 const bool is_efirt_pmap_p = false;
1401 #endif
1402 u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
1403 #ifdef UVMHIST
1404 struct kern_history * const histp =
1405 ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
1406 #endif
1407
1408 UVMHIST_FUNC(__func__);
1409 UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
1410 (uintptr_t)pmap, va, pa, 0);
1411 UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
1412
1413 const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
1414 if (is_kernel_pmap_p) {
1415 PMAP_COUNT(kernel_mappings);
1416 if (!good_color)
1417 PMAP_COUNT(kernel_mappings_bad);
1418 } else {
1419 PMAP_COUNT(user_mappings);
1420 if (!good_color)
1421 PMAP_COUNT(user_mappings_bad);
1422 }
1423 pmap_addr_range_check(pmap, va, va, __func__);
1424
1425 KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
1426 VM_PROT_READ, prot);
1427
1428 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1429 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
1430
1431 struct vm_page_md *mdpp = NULL;
1432 #ifdef __HAVE_PMAP_PV_TRACK
1433 struct pmap_page *pp = pmap_pv_tracked(pa);
1434 mdpp = pp ? PMAP_PAGE_TO_MD(pp) : NULL;
1435 #endif
1436
1437 if (mdpg) {
1438 /* Set page referenced/modified status based on flags */
1439 if (flags & VM_PROT_WRITE) {
1440 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
1441 } else if (flags & VM_PROT_ALL) {
1442 pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
1443 }
1444
1445 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1446 if (!VM_PAGEMD_CACHED_P(mdpg)) {
1447 flags |= PMAP_NOCACHE;
1448 PMAP_COUNT(uncached_mappings);
1449 }
1450 #endif
1451
1452 PMAP_COUNT(managed_mappings);
1453 } else if (mdpp) {
1454 #ifdef __HAVE_PMAP_PV_TRACK
1455 pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
1456
1457 PMAP_COUNT(pvtracked_mappings);
1458 #endif
1459 } else if (is_efirt_pmap_p) {
1460 PMAP_COUNT(efirt_mappings);
1461 } else {
1462 /*
1463 * Assumption: if it is not part of our managed memory
1464 * then it must be device memory which may be volatile.
1465 */
1466 if ((flags & PMAP_CACHE_MASK) == 0)
1467 flags |= PMAP_NOCACHE;
1468 PMAP_COUNT(unmanaged_mappings);
1469 }
1470
1471 KASSERTMSG(mdpg == NULL || mdpp == NULL || is_efirt_pmap_p,
1472 "mdpg %p mdpp %p efirt %s", mdpg, mdpp,
1473 is_efirt_pmap_p ? "true" : "false");
1474
1475 struct vm_page_md *md = (mdpg != NULL) ? mdpg : mdpp;
1476 pt_entry_t npte = is_efirt_pmap_p ?
1477 pte_make_enter_efirt(pa, prot, flags) :
1478 pte_make_enter(pa, md, prot, flags, is_kernel_pmap_p);
1479
1480 kpreempt_disable();
1481
1482 pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
1483 if (__predict_false(ptep == NULL)) {
1484 kpreempt_enable();
1485 UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
1486 return ENOMEM;
1487 }
1488 const pt_entry_t opte = *ptep;
1489 const bool resident = pte_valid_p(opte);
1490 bool remap = false;
1491 if (resident) {
1492 if (pte_to_paddr(opte) != pa) {
1493 KASSERT(!is_kernel_pmap_p);
1494 const pt_entry_t rpte = pte_nv_entry(false);
1495
1496 pmap_addr_range_check(pmap, va, va + NBPG, __func__);
1497 pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove,
1498 rpte);
1499 PMAP_COUNT(user_mappings_changed);
1500 remap = true;
1501 }
1502 update_flags |= PMAP_TLB_NEED_IPI;
1503 }
1504
1505 if (!resident || remap) {
1506 pmap->pm_stats.resident_count++;
1507 }
1508
1509 /* Done after case that may sleep/return. */
1510 if (md)
1511 pmap_enter_pv(pmap, va, pa, md, &npte, 0);
1512
1513 /*
1514 * Now validate mapping with desired protection/wiring.
1515 */
1516 if (wired) {
1517 pmap->pm_stats.wired_count++;
1518 npte = pte_wire_entry(npte);
1519 }
1520
1521 UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)",
1522 pte_value(npte), pa, 0, 0);
1523
1524 KASSERT(pte_valid_p(npte));
1525
1526 pmap_tlb_miss_lock_enter();
1527 pte_set(ptep, npte);
1528 pmap_tlb_update_addr(pmap, va, npte, update_flags);
1529 pmap_tlb_miss_lock_exit();
1530 kpreempt_enable();
1531
1532 if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
1533 KASSERT(mdpg != NULL);
1534 PMAP_COUNT(exec_mappings);
1535 if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
1536 if (!pte_deferred_exec_p(npte)) {
1537 UVMHIST_LOG(*histp, "va=%#jx pg %#jx: "
1538 "immediate syncicache",
1539 va, (uintptr_t)pg, 0, 0);
1540 pmap_page_syncicache(pg);
1541 pmap_page_set_attributes(mdpg,
1542 VM_PAGEMD_EXECPAGE);
1543 PMAP_COUNT(exec_synced_mappings);
1544 } else {
1545 UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer "
1546 "syncicache: pte %#jx",
1547 va, (uintptr_t)pg, npte, 0);
1548 }
1549 } else {
1550 UVMHIST_LOG(*histp,
1551 "va=%#jx pg %#jx: no syncicache cached %jd",
1552 va, (uintptr_t)pg, pte_cached_p(npte), 0);
1553 }
1554 } else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
1555 KASSERT(mdpg != NULL);
1556 KASSERT(prot & VM_PROT_WRITE);
1557 PMAP_COUNT(exec_mappings);
1558 pmap_page_syncicache(pg);
1559 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
1560 UVMHIST_LOG(*histp,
1561 "va=%#jx pg %#jx: immediate syncicache (writeable)",
1562 va, (uintptr_t)pg, 0, 0);
1563 }
1564
1565 UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
1566 return 0;
1567 }
1568
1569 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1570 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1571 {
1572 pmap_t pmap = pmap_kernel();
1573 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1574 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
1575
1576 UVMHIST_FUNC(__func__);
1577 UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
1578 va, pa, prot, flags);
1579 PMAP_COUNT(kenter_pa);
1580
1581 if (mdpg == NULL) {
1582 PMAP_COUNT(kenter_pa_unmanaged);
1583 if ((flags & PMAP_CACHE_MASK) == 0)
1584 flags |= PMAP_NOCACHE;
1585 } else {
1586 if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
1587 PMAP_COUNT(kenter_pa_bad);
1588 }
1589
1590 pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
1591 kpreempt_disable();
1592 pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, 0);
1593
1594 KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
1595 pmap_limits.virtual_end);
1596 KASSERT(!pte_valid_p(*ptep));
1597
1598 /*
1599 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
1600 */
1601 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1602 if (pg != NULL && (flags & PMAP_KMPAGE) == 0
1603 && pmap_md_virtual_cache_aliasing_p()) {
1604 pmap_enter_pv(pmap, va, pa, mdpg, &npte, PV_KENTER);
1605 }
1606 #endif
1607
1608 /*
1609 * We have the option to force this mapping into the TLB but we
1610 * don't. Instead let the next reference to the page do it.
1611 */
1612 pmap_tlb_miss_lock_enter();
1613 pte_set(ptep, npte);
1614 pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
1615 pmap_tlb_miss_lock_exit();
1616 kpreempt_enable();
1617 #if DEBUG > 1
1618 for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
1619 if (((long *)va)[i] != ((long *)pa)[i])
1620 panic("%s: contents (%lx) of va %#"PRIxVADDR
1621 " != contents (%lx) of pa %#"PRIxPADDR, __func__,
1622 ((long *)va)[i], va, ((long *)pa)[i], pa);
1623 }
1624 #endif
1625
1626 UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0,
1627 0);
1628 }
1629
1630 /*
1631 * Remove the given range of addresses from the kernel map.
1632 *
1633 * It is assumed that the start and end are properly
1634 * rounded to the page size.
1635 */
1636
1637 static bool
pmap_pte_kremove(pmap_t pmap,vaddr_t sva,vaddr_t eva,pt_entry_t * ptep,uintptr_t flags)1638 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1639 uintptr_t flags)
1640 {
1641 const pt_entry_t new_pte = pte_nv_entry(true);
1642
1643 UVMHIST_FUNC(__func__);
1644 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
1645 (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
1646
1647 KASSERT(kpreempt_disabled());
1648
1649 for (; sva < eva; sva += NBPG, ptep++) {
1650 pt_entry_t pte = *ptep;
1651 if (!pte_valid_p(pte))
1652 continue;
1653
1654 PMAP_COUNT(kremove_pages);
1655 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1656 struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
1657 if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) {
1658 pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
1659 }
1660 #endif
1661
1662 pmap_tlb_miss_lock_enter();
1663 pte_set(ptep, new_pte);
1664 pmap_tlb_invalidate_addr(pmap, sva);
1665 pmap_tlb_miss_lock_exit();
1666 }
1667
1668 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1669
1670 return false;
1671 }
1672
1673 void
pmap_kremove(vaddr_t va,vsize_t len)1674 pmap_kremove(vaddr_t va, vsize_t len)
1675 {
1676 const vaddr_t sva = trunc_page(va);
1677 const vaddr_t eva = round_page(va + len);
1678
1679 UVMHIST_FUNC(__func__);
1680 UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
1681
1682 kpreempt_disable();
1683 pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
1684 kpreempt_enable();
1685
1686 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1687 }
1688
1689 bool
pmap_remove_all(struct pmap * pmap)1690 pmap_remove_all(struct pmap *pmap)
1691 {
1692 UVMHIST_FUNC(__func__);
1693 UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
1694
1695 KASSERT(pmap != pmap_kernel());
1696
1697 kpreempt_disable();
1698 /*
1699 * Free all of our ASIDs which means we can skip doing all the
1700 * tlb_invalidate_addrs().
1701 */
1702 pmap_tlb_miss_lock_enter();
1703 #ifdef MULTIPROCESSOR
1704 // This should be the last CPU with this pmap onproc
1705 KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
1706 if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
1707 #endif
1708 pmap_tlb_asid_deactivate(pmap);
1709 #ifdef MULTIPROCESSOR
1710 KASSERT(kcpuset_iszero(pmap->pm_onproc));
1711 #endif
1712 pmap_tlb_asid_release_all(pmap);
1713 pmap_tlb_miss_lock_exit();
1714 pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
1715
1716 #ifdef PMAP_FAULTINFO
1717 curpcb->pcb_faultinfo.pfi_faultaddr = 0;
1718 curpcb->pcb_faultinfo.pfi_repeats = 0;
1719 curpcb->pcb_faultinfo.pfi_faultptep = NULL;
1720 #endif
1721 kpreempt_enable();
1722
1723 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1724 return false;
1725 }
1726
1727 /*
1728 * Routine: pmap_unwire
1729 * Function: Clear the wired attribute for a map/virtual-address
1730 * pair.
1731 * In/out conditions:
1732 * The mapping must already exist in the pmap.
1733 */
1734 void
pmap_unwire(pmap_t pmap,vaddr_t va)1735 pmap_unwire(pmap_t pmap, vaddr_t va)
1736 {
1737 UVMHIST_FUNC(__func__);
1738 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
1739 0, 0);
1740 PMAP_COUNT(unwire);
1741
1742 /*
1743 * Don't need to flush the TLB since PG_WIRED is only in software.
1744 */
1745 kpreempt_disable();
1746 pmap_addr_range_check(pmap, va, va, __func__);
1747 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1748 KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
1749 pmap, va);
1750 pt_entry_t pte = *ptep;
1751 KASSERTMSG(pte_valid_p(pte),
1752 "pmap %p va %#" PRIxVADDR " invalid PTE %#" PRIxPTE " @ %p",
1753 pmap, va, pte_value(pte), ptep);
1754
1755 if (pte_wired_p(pte)) {
1756 pmap_tlb_miss_lock_enter();
1757 pte_set(ptep, pte_unwire_entry(pte));
1758 pmap_tlb_miss_lock_exit();
1759 pmap->pm_stats.wired_count--;
1760 }
1761 #ifdef DIAGNOSTIC
1762 else {
1763 printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
1764 __func__, pmap, va);
1765 }
1766 #endif
1767 kpreempt_enable();
1768
1769 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1770 }
1771
1772 /*
1773 * Routine: pmap_extract
1774 * Function:
1775 * Extract the physical page address associated
1776 * with the given map/virtual_address pair.
1777 */
1778 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)1779 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1780 {
1781 paddr_t pa;
1782
1783 if (pmap == pmap_kernel()) {
1784 if (pmap_md_direct_mapped_vaddr_p(va)) {
1785 pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
1786 goto done;
1787 }
1788 if (pmap_md_io_vaddr_p(va))
1789 panic("pmap_extract: io address %#"PRIxVADDR"", va);
1790
1791 if (va >= pmap_limits.virtual_end)
1792 panic("%s: illegal kernel mapped address %#"PRIxVADDR,
1793 __func__, va);
1794 }
1795 kpreempt_disable();
1796 const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1797 if (ptep == NULL || !pte_valid_p(*ptep)) {
1798 kpreempt_enable();
1799 return false;
1800 }
1801 pa = pte_to_paddr(*ptep) | (va & PGOFSET);
1802 kpreempt_enable();
1803 done:
1804 if (pap != NULL) {
1805 *pap = pa;
1806 }
1807 return true;
1808 }
1809
1810 /*
1811 * Copy the range specified by src_addr/len
1812 * from the source map to the range dst_addr/len
1813 * in the destination map.
1814 *
1815 * This routine is only advisory and need not do anything.
1816 */
1817 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)1818 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
1819 vaddr_t src_addr)
1820 {
1821 UVMHIST_FUNC(__func__);
1822 UVMHIST_CALLED(pmaphist);
1823 PMAP_COUNT(copy);
1824 }
1825
1826 /*
1827 * pmap_clear_reference:
1828 *
1829 * Clear the reference bit on the specified physical page.
1830 */
1831 bool
pmap_clear_reference(struct vm_page * pg)1832 pmap_clear_reference(struct vm_page *pg)
1833 {
1834 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1835
1836 UVMHIST_FUNC(__func__);
1837 UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))",
1838 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
1839
1840 bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
1841
1842 UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0);
1843
1844 return rv;
1845 }
1846
1847 /*
1848 * pmap_is_referenced:
1849 *
1850 * Return whether or not the specified physical page is referenced
1851 * by any physical maps.
1852 */
1853 bool
pmap_is_referenced(struct vm_page * pg)1854 pmap_is_referenced(struct vm_page *pg)
1855 {
1856 return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
1857 }
1858
1859 /*
1860 * Clear the modify bits on the specified physical page.
1861 */
1862 bool
pmap_clear_modify(struct vm_page * pg)1863 pmap_clear_modify(struct vm_page *pg)
1864 {
1865 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1866 pv_entry_t pv = &mdpg->mdpg_first;
1867 pv_entry_t pv_next;
1868
1869 UVMHIST_FUNC(__func__);
1870 UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))",
1871 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
1872 PMAP_COUNT(clear_modify);
1873
1874 if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
1875 if (pv->pv_pmap == NULL) {
1876 UVMHIST_LOG(pmapexechist,
1877 "pg %#jx (pa %#jx): execpage cleared",
1878 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
1879 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
1880 PMAP_COUNT(exec_uncached_clear_modify);
1881 } else {
1882 UVMHIST_LOG(pmapexechist,
1883 "pg %#jx (pa %#jx): syncicache performed",
1884 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
1885 pmap_page_syncicache(pg);
1886 PMAP_COUNT(exec_synced_clear_modify);
1887 }
1888 }
1889 if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
1890 UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
1891 return false;
1892 }
1893 if (pv->pv_pmap == NULL) {
1894 UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
1895 return true;
1896 }
1897
1898 /*
1899 * remove write access from any pages that are dirty
1900 * so we can tell if they are written to again later.
1901 * flush the VAC first if there is one.
1902 */
1903 kpreempt_disable();
1904 VM_PAGEMD_PVLIST_READLOCK(mdpg);
1905 pmap_pvlist_check(mdpg);
1906 for (; pv != NULL; pv = pv_next) {
1907 pmap_t pmap = pv->pv_pmap;
1908 vaddr_t va = trunc_page(pv->pv_va);
1909
1910 pv_next = pv->pv_next;
1911 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1912 if (PV_ISKENTER_P(pv))
1913 continue;
1914 #endif
1915 pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1916 KASSERT(ptep);
1917 pt_entry_t pte = pte_prot_nowrite(*ptep);
1918 if (*ptep == pte) {
1919 continue;
1920 }
1921 KASSERT(pte_valid_p(pte));
1922 const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1923 pmap_tlb_miss_lock_enter();
1924 pte_set(ptep, pte);
1925 pmap_tlb_invalidate_addr(pmap, va);
1926 pmap_tlb_miss_lock_exit();
1927 pmap_update(pmap);
1928 if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
1929 /*
1930 * The list changed! So restart from the beginning.
1931 */
1932 pv_next = &mdpg->mdpg_first;
1933 pmap_pvlist_check(mdpg);
1934 }
1935 }
1936 pmap_pvlist_check(mdpg);
1937 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1938 kpreempt_enable();
1939
1940 UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
1941 return true;
1942 }
1943
1944 /*
1945 * pmap_is_modified:
1946 *
1947 * Return whether or not the specified physical page is modified
1948 * by any physical maps.
1949 */
1950 bool
pmap_is_modified(struct vm_page * pg)1951 pmap_is_modified(struct vm_page *pg)
1952 {
1953 return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
1954 }
1955
1956 /*
1957 * pmap_set_modified:
1958 *
1959 * Sets the page modified reference bit for the specified page.
1960 */
1961 void
pmap_set_modified(paddr_t pa)1962 pmap_set_modified(paddr_t pa)
1963 {
1964 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1965 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1966 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
1967 }
1968
1969 /******************** pv_entry management ********************/
1970
1971 static void
pmap_pvlist_check(struct vm_page_md * mdpg)1972 pmap_pvlist_check(struct vm_page_md *mdpg)
1973 {
1974 #ifdef DEBUG
1975 pv_entry_t pv = &mdpg->mdpg_first;
1976 if (pv->pv_pmap != NULL) {
1977 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1978 const u_int colormask = uvmexp.colormask;
1979 u_int colors = 0;
1980 #endif
1981 for (; pv != NULL; pv = pv->pv_next) {
1982 KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
1983 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1984 colors |= __BIT(atop(pv->pv_va) & colormask);
1985 #endif
1986 }
1987 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1988 // Assert that if there is more than 1 color mapped, that the
1989 // page is uncached.
1990 KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
1991 || colors == 0 || (colors & (colors-1)) == 0
1992 || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
1993 colors, VM_PAGEMD_UNCACHED_P(mdpg));
1994 #endif
1995 } else {
1996 KASSERT(pv->pv_next == NULL);
1997 }
1998 #endif /* DEBUG */
1999 }
2000
2001 /*
2002 * Enter the pmap and virtual address into the
2003 * physical to virtual map table.
2004 */
2005 void
pmap_enter_pv(pmap_t pmap,vaddr_t va,paddr_t pa,struct vm_page_md * mdpg,pt_entry_t * nptep,u_int flags)2006 pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg,
2007 pt_entry_t *nptep, u_int flags)
2008 {
2009 pv_entry_t pv, npv, apv;
2010 #ifdef UVMHIST
2011 bool first = false;
2012 struct vm_page *pg = VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) :
2013 NULL;
2014 #endif
2015
2016 UVMHIST_FUNC(__func__);
2017 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
2018 (uintptr_t)pmap, va, (uintptr_t)pg, pa);
2019 UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
2020 (uintptr_t)nptep, pte_value(*nptep), 0, 0);
2021
2022 KASSERT(kpreempt_disabled());
2023 KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
2024 KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
2025 "va %#"PRIxVADDR, va);
2026
2027 apv = NULL;
2028 VM_PAGEMD_PVLIST_LOCK(mdpg);
2029 again:
2030 pv = &mdpg->mdpg_first;
2031 pmap_pvlist_check(mdpg);
2032 if (pv->pv_pmap == NULL) {
2033 KASSERT(pv->pv_next == NULL);
2034 /*
2035 * No entries yet, use header as the first entry
2036 */
2037 PMAP_COUNT(primary_mappings);
2038 PMAP_COUNT(mappings);
2039 #ifdef UVMHIST
2040 first = true;
2041 #endif
2042 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2043 KASSERT(VM_PAGEMD_CACHED_P(mdpg));
2044 // If the new mapping has an incompatible color the last
2045 // mapping of this page, clean the page before using it.
2046 if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
2047 pmap_md_vca_clean(mdpg, PMAP_WBINV);
2048 }
2049 #endif
2050 pv->pv_pmap = pmap;
2051 pv->pv_va = va | flags;
2052 } else {
2053 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2054 if (pmap_md_vca_add(mdpg, va, nptep)) {
2055 goto again;
2056 }
2057 #endif
2058
2059 /*
2060 * There is at least one other VA mapping this page.
2061 * Place this entry after the header.
2062 *
2063 * Note: the entry may already be in the table if
2064 * we are only changing the protection bits.
2065 */
2066
2067 for (npv = pv; npv; npv = npv->pv_next) {
2068 if (pmap == npv->pv_pmap
2069 && va == trunc_page(npv->pv_va)) {
2070 #ifdef PARANOIADIAG
2071 pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
2072 pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
2073 if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
2074 printf("%s: found va %#"PRIxVADDR
2075 " pa %#"PRIxPADDR
2076 " in pv_table but != %#"PRIxPTE"\n",
2077 __func__, va, pa, pte_value(pte));
2078 #endif
2079 PMAP_COUNT(remappings);
2080 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2081 if (__predict_false(apv != NULL))
2082 pmap_pv_free(apv);
2083
2084 UVMHIST_LOG(pmaphist,
2085 " <-- done pv=%#jx (reused)",
2086 (uintptr_t)pv, 0, 0, 0);
2087 return;
2088 }
2089 }
2090 if (__predict_true(apv == NULL)) {
2091 /*
2092 * To allocate a PV, we have to release the PVLIST lock
2093 * so get the page generation. We allocate the PV, and
2094 * then reacquire the lock.
2095 */
2096 pmap_pvlist_check(mdpg);
2097 const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2098
2099 apv = (pv_entry_t)pmap_pv_alloc();
2100 if (apv == NULL)
2101 panic("pmap_enter_pv: pmap_pv_alloc() failed");
2102
2103 /*
2104 * If the generation has changed, then someone else
2105 * tinkered with this page so we should start over.
2106 */
2107 if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
2108 goto again;
2109 }
2110 npv = apv;
2111 apv = NULL;
2112 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2113 /*
2114 * If need to deal with virtual cache aliases, keep mappings
2115 * in the kernel pmap at the head of the list. This allows
2116 * the VCA code to easily use them for cache operations if
2117 * present.
2118 */
2119 pmap_t kpmap = pmap_kernel();
2120 if (pmap != kpmap) {
2121 while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
2122 pv = pv->pv_next;
2123 }
2124 }
2125 #endif
2126 npv->pv_va = va | flags;
2127 npv->pv_pmap = pmap;
2128 npv->pv_next = pv->pv_next;
2129 pv->pv_next = npv;
2130 PMAP_COUNT(mappings);
2131 }
2132 pmap_pvlist_check(mdpg);
2133 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2134 if (__predict_false(apv != NULL))
2135 pmap_pv_free(apv);
2136
2137 UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv,
2138 first, 0, 0);
2139 }
2140
2141 /*
2142 * Remove a physical to virtual address translation.
2143 * If cache was inhibited on this page, and there are no more cache
2144 * conflicts, restore caching.
2145 * Flush the cache if the last page is removed (should always be cached
2146 * at this point).
2147 */
2148 void
pmap_remove_pv(pmap_t pmap,vaddr_t va,struct vm_page * pg,bool dirty)2149 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
2150 {
2151 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
2152 pv_entry_t pv, npv;
2153 bool last;
2154
2155 UVMHIST_FUNC(__func__);
2156 UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
2157 (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
2158 UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
2159
2160 KASSERT(kpreempt_disabled());
2161 KASSERT((va & PAGE_MASK) == 0);
2162 pv = &mdpg->mdpg_first;
2163
2164 VM_PAGEMD_PVLIST_LOCK(mdpg);
2165 pmap_pvlist_check(mdpg);
2166
2167 /*
2168 * If it is the first entry on the list, it is actually
2169 * in the header and we must copy the following entry up
2170 * to the header. Otherwise we must search the list for
2171 * the entry. In either case we free the now unused entry.
2172 */
2173
2174 last = false;
2175 if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
2176 npv = pv->pv_next;
2177 if (npv) {
2178 *pv = *npv;
2179 KASSERT(pv->pv_pmap != NULL);
2180 } else {
2181 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2182 pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
2183 #endif
2184 pv->pv_pmap = NULL;
2185 last = true; /* Last mapping removed */
2186 }
2187 PMAP_COUNT(remove_pvfirst);
2188 } else {
2189 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
2190 PMAP_COUNT(remove_pvsearch);
2191 if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
2192 break;
2193 }
2194 if (npv) {
2195 pv->pv_next = npv->pv_next;
2196 }
2197 }
2198
2199 pmap_pvlist_check(mdpg);
2200 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2201
2202 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2203 pmap_md_vca_remove(pg, va, dirty, last);
2204 #endif
2205
2206 /*
2207 * Free the pv_entry if needed.
2208 */
2209 if (npv)
2210 pmap_pv_free(npv);
2211 if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
2212 if (last) {
2213 /*
2214 * If this was the page's last mapping, we no longer
2215 * care about its execness.
2216 */
2217 UVMHIST_LOG(pmapexechist,
2218 "pg %#jx (pa %#jx)last %ju: execpage cleared",
2219 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
2220 pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
2221 PMAP_COUNT(exec_uncached_remove);
2222 } else {
2223 /*
2224 * Someone still has it mapped as an executable page
2225 * so we must sync it.
2226 */
2227 UVMHIST_LOG(pmapexechist,
2228 "pg %#jx (pa %#jx) last %ju: performed syncicache",
2229 (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
2230 pmap_page_syncicache(pg);
2231 PMAP_COUNT(exec_synced_remove);
2232 }
2233 }
2234
2235 UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
2236 }
2237
2238 #if defined(MULTIPROCESSOR)
2239 struct pmap_pvlist_info {
2240 kmutex_t *pli_locks[PAGE_SIZE / 32];
2241 volatile u_int pli_lock_refs[PAGE_SIZE / 32];
2242 volatile u_int pli_lock_index;
2243 u_int pli_lock_mask;
2244 } pmap_pvlist_info;
2245
2246 void
pmap_pvlist_lock_init(size_t cache_line_size)2247 pmap_pvlist_lock_init(size_t cache_line_size)
2248 {
2249 struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
2250 const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
2251 vaddr_t lock_va = lock_page;
2252 if (sizeof(kmutex_t) > cache_line_size) {
2253 cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
2254 }
2255 const size_t nlocks = PAGE_SIZE / cache_line_size;
2256 KASSERT((nlocks & (nlocks - 1)) == 0);
2257 /*
2258 * Now divide the page into a number of mutexes, one per cacheline.
2259 */
2260 for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
2261 kmutex_t * const lock = (kmutex_t *)lock_va;
2262 mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
2263 pli->pli_locks[i] = lock;
2264 }
2265 pli->pli_lock_mask = nlocks - 1;
2266 }
2267
2268 kmutex_t *
pmap_pvlist_lock_addr(struct vm_page_md * mdpg)2269 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
2270 {
2271 struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
2272 kmutex_t *lock = mdpg->mdpg_lock;
2273
2274 /*
2275 * Allocate a lock on an as-needed basis. This will hopefully give us
2276 * semi-random distribution not based on page color.
2277 */
2278 if (__predict_false(lock == NULL)) {
2279 size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
2280 size_t lockid = locknum & pli->pli_lock_mask;
2281 kmutex_t * const new_lock = pli->pli_locks[lockid];
2282 /*
2283 * Set the lock. If some other thread already did, just use
2284 * the one they assigned.
2285 */
2286 lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
2287 if (lock == NULL) {
2288 lock = new_lock;
2289 atomic_inc_uint(&pli->pli_lock_refs[lockid]);
2290 }
2291 }
2292
2293 /*
2294 * Now finally provide the lock.
2295 */
2296 return lock;
2297 }
2298 #else /* !MULTIPROCESSOR */
2299 void
pmap_pvlist_lock_init(size_t cache_line_size)2300 pmap_pvlist_lock_init(size_t cache_line_size)
2301 {
2302 mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
2303 }
2304
2305 #ifdef MODULAR
2306 kmutex_t *
pmap_pvlist_lock_addr(struct vm_page_md * mdpg)2307 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
2308 {
2309 /*
2310 * We just use a global lock.
2311 */
2312 if (__predict_false(mdpg->mdpg_lock == NULL)) {
2313 mdpg->mdpg_lock = &pmap_pvlist_mutex;
2314 }
2315
2316 /*
2317 * Now finally provide the lock.
2318 */
2319 return mdpg->mdpg_lock;
2320 }
2321 #endif /* MODULAR */
2322 #endif /* !MULTIPROCESSOR */
2323
2324 /*
2325 * pmap_pv_page_alloc:
2326 *
2327 * Allocate a page for the pv_entry pool.
2328 */
2329 void *
pmap_pv_page_alloc(struct pool * pp,int flags)2330 pmap_pv_page_alloc(struct pool *pp, int flags)
2331 {
2332 struct vm_page * const pg = pmap_md_alloc_poolpage(UVM_PGA_USERESERVE);
2333 if (pg == NULL)
2334 return NULL;
2335
2336 return (void *)pmap_md_map_poolpage(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
2337 }
2338
2339 /*
2340 * pmap_pv_page_free:
2341 *
2342 * Free a pv_entry pool page.
2343 */
2344 void
pmap_pv_page_free(struct pool * pp,void * v)2345 pmap_pv_page_free(struct pool *pp, void *v)
2346 {
2347 vaddr_t va = (vaddr_t)v;
2348
2349 KASSERT(pmap_md_direct_mapped_vaddr_p(va));
2350 const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
2351 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
2352 KASSERT(pg != NULL);
2353 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2354 kpreempt_disable();
2355 pmap_md_vca_remove(pg, va, true, true);
2356 kpreempt_enable();
2357 #endif
2358 pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
2359 KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
2360 uvm_pagefree(pg);
2361 }
2362
2363 #ifdef PMAP_PREFER
2364 /*
2365 * Find first virtual address >= *vap that doesn't cause
2366 * a cache alias conflict.
2367 */
2368 void
pmap_prefer(vaddr_t foff,vaddr_t * vap,vsize_t sz,int td)2369 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
2370 {
2371 vsize_t prefer_mask = ptoa(uvmexp.colormask);
2372
2373 PMAP_COUNT(prefer_requests);
2374
2375 prefer_mask |= pmap_md_cache_prefer_mask();
2376
2377 if (prefer_mask) {
2378 vaddr_t va = *vap;
2379 vsize_t d = (foff - va) & prefer_mask;
2380 if (d) {
2381 if (td)
2382 *vap = trunc_page(va - ((-d) & prefer_mask));
2383 else
2384 *vap = round_page(va + d);
2385 PMAP_COUNT(prefer_adjustments);
2386 }
2387 }
2388 }
2389 #endif /* PMAP_PREFER */
2390
2391 #ifdef PMAP_MAP_POOLPAGE
2392 vaddr_t
pmap_map_poolpage(paddr_t pa)2393 pmap_map_poolpage(paddr_t pa)
2394 {
2395 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
2396 KASSERT(pg);
2397
2398 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
2399 KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
2400
2401 pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
2402
2403 return pmap_md_map_poolpage(pa, NBPG);
2404 }
2405
2406 paddr_t
pmap_unmap_poolpage(vaddr_t va)2407 pmap_unmap_poolpage(vaddr_t va)
2408 {
2409 KASSERT(pmap_md_direct_mapped_vaddr_p(va));
2410 paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
2411
2412 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
2413 KASSERT(pg != NULL);
2414 KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
2415
2416 pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
2417 pmap_md_unmap_poolpage(va, NBPG);
2418
2419 return pa;
2420 }
2421 #endif /* PMAP_MAP_POOLPAGE */
2422
2423 #ifdef DDB
2424 void
2425 pmap_db_mdpg_print(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2))
2426 {
2427 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
2428 pv_entry_t pv = &mdpg->mdpg_first;
2429
2430 if (pv->pv_pmap == NULL) {
2431 pr(" no mappings\n");
2432 return;
2433 }
2434
2435 int lcount = 0;
2436 if (VM_PAGEMD_VMPAGE_P(mdpg)) {
2437 pr(" vmpage");
2438 lcount++;
2439 }
2440 if (VM_PAGEMD_POOLPAGE_P(mdpg)) {
2441 if (lcount != 0)
2442 pr(",");
2443 pr(" pool");
2444 lcount++;
2445 }
2446 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2447 if (VM_PAGEMD_UNCACHED_P(mdpg)) {
2448 if (lcount != 0)
2449 pr(",");
2450 pr(" uncached\n");
2451 }
2452 #endif
2453 pr("\n");
2454
2455 lcount = 0;
2456 if (VM_PAGEMD_REFERENCED_P(mdpg)) {
2457 pr(" referened");
2458 lcount++;
2459 }
2460 if (VM_PAGEMD_MODIFIED_P(mdpg)) {
2461 if (lcount != 0)
2462 pr(",");
2463 pr(" modified");
2464 lcount++;
2465 }
2466 if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
2467 if (lcount != 0)
2468 pr(",");
2469 pr(" exec");
2470 lcount++;
2471 }
2472 pr("\n");
2473
2474 for (size_t i = 0; pv != NULL; pv = pv->pv_next) {
2475 pr(" pv[%zu] pv=%p\n", i, pv);
2476 pr(" pv[%zu].pv_pmap = %p", i, pv->pv_pmap);
2477 pr(" pv[%zu].pv_va = %" PRIxVADDR " (kenter=%s)\n",
2478 i, trunc_page(pv->pv_va), PV_ISKENTER_P(pv) ? "true" : "false");
2479 i++;
2480 }
2481 }
2482
2483 void
2484 pmap_db_pmap_print(struct pmap *pm,
2485 void (*pr)(const char *, ...) __printflike(1, 2))
2486 {
2487 #if defined(PMAP_HWPAGEWALKER)
2488 pr(" pm_pdetab = %p\n", pm->pm_pdetab);
2489 #endif
2490 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
2491 pr(" pm_segtab = %p\n", pm->pm_segtab);
2492 #endif
2493
2494 pmap_db_tlb_print(pm, pr);
2495 }
2496 #endif /* DDB */
2497