1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
7 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
8 * Copyright (c) 2014-2016 Svatopluk Kraus <skra@FreeBSD.org>
9 * Copyright (c) 2014-2016 Michal Meloun <mmel@FreeBSD.org>
10 * All rights reserved.
11 *
12 * This code is derived from software contributed to Berkeley by
13 * the Systems Programming Group of the University of Utah Computer
14 * Science Department and William Jolitz of UUNET Technologies Inc.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 */
40 /*-
41 * Copyright (c) 2003 Networks Associates Technology, Inc.
42 * All rights reserved.
43 *
44 * This software was developed for the FreeBSD Project by Jake Burkholder,
45 * Safeport Network Services, and Network Associates Laboratories, the
46 * Security Research Division of Network Associates, Inc. under
47 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
48 * CHATS research program.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 */
71
72 #include <sys/cdefs.h>
73 /*
74 * Manages physical address maps.
75 *
76 * Since the information managed by this module is
77 * also stored by the logical address mapping module,
78 * this module may throw away valid virtual-to-physical
79 * mappings at almost any time. However, invalidations
80 * of virtual-to-physical mappings must be done as
81 * requested.
82 *
83 * In order to cope with hardware architectures which
84 * make virtual-to-physical map invalidates expensive,
85 * this module may delay invalidate or reduced protection
86 * operations until such time as they are actually
87 * necessary. This module is given full information as
88 * to which processors are currently using which maps,
89 * and to when physical maps must be made correct.
90 */
91
92 #include "opt_vm.h"
93 #include "opt_pmap.h"
94 #include "opt_ddb.h"
95
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/kernel.h>
99 #include <sys/ktr.h>
100 #include <sys/lock.h>
101 #include <sys/proc.h>
102 #include <sys/rwlock.h>
103 #include <sys/malloc.h>
104 #include <sys/vmmeter.h>
105 #include <sys/malloc.h>
106 #include <sys/mman.h>
107 #include <sys/sf_buf.h>
108 #include <sys/smp.h>
109 #include <sys/sched.h>
110 #include <sys/sysctl.h>
111
112 #ifdef DDB
113 #include <ddb/ddb.h>
114 #endif
115
116 #include <vm/vm.h>
117 #include <vm/uma.h>
118 #include <vm/pmap.h>
119 #include <vm/vm_param.h>
120 #include <vm/vm_kern.h>
121 #include <vm/vm_object.h>
122 #include <vm/vm_map.h>
123 #include <vm/vm_page.h>
124 #include <vm/vm_pageout.h>
125 #include <vm/vm_phys.h>
126 #include <vm/vm_extern.h>
127 #include <vm/vm_reserv.h>
128 #include <sys/lock.h>
129 #include <sys/mutex.h>
130
131 #include <machine/md_var.h>
132 #include <machine/pmap_var.h>
133 #include <machine/cpu.h>
134 #include <machine/pcb.h>
135 #include <machine/sf_buf.h>
136 #ifdef SMP
137 #include <machine/smp.h>
138 #endif
139 #ifndef PMAP_SHPGPERPROC
140 #define PMAP_SHPGPERPROC 200
141 #endif
142
143 #ifndef DIAGNOSTIC
144 #define PMAP_INLINE __inline
145 #else
146 #define PMAP_INLINE
147 #endif
148
149 #ifdef PMAP_DEBUG
150 static void pmap_zero_page_check(vm_page_t m);
151 void pmap_debug(int level);
152 int pmap_pid_dump(int pid);
153
154 #define PDEBUG(_lev_,_stat_) \
155 if (pmap_debug_level >= (_lev_)) \
156 ((_stat_))
157 #define dprintf printf
158 int pmap_debug_level = 1;
159 #else /* PMAP_DEBUG */
160 #define PDEBUG(_lev_,_stat_) /* Nothing */
161 #define dprintf(x, arg...)
162 #endif /* PMAP_DEBUG */
163
164 /*
165 * Level 2 page tables map definion ('max' is excluded).
166 */
167
168 #define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP)
169 #define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE)
170
171 #define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP)
172 #define UPT2V_MAX_ADDRESS \
173 ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT)))
174
175 /*
176 * Promotion to a 1MB (PTE1) page mapping requires that the corresponding
177 * 4KB (PTE2) page mappings have identical settings for the following fields:
178 */
179 #define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \
180 PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \
181 PTE2_ATTR_MASK)
182
183 #define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \
184 PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \
185 PTE1_ATTR_MASK)
186
187 #define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \
188 (((l2_attr) & L2_C) ? L1_S_C : 0) | \
189 (((l2_attr) & L2_B) ? L1_S_B : 0) | \
190 (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \
191 (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \
192 (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \
193 (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \
194 (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \
195 (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \
196 (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \
197 (((l2_attr) & PTE2_W) ? PTE1_W : 0))
198
199 #define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \
200 (((l1_attr) & L1_S_C) ? L2_C : 0) | \
201 (((l1_attr) & L1_S_B) ? L2_B : 0) | \
202 (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \
203 (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \
204 (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \
205 (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \
206 (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \
207 (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \
208 (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \
209 (((l1_attr) & PTE1_W) ? PTE2_W : 0))
210
211 /*
212 * PTE2 descriptors creation macros.
213 */
214 #define PTE2_ATTR_DEFAULT vm_memattr_to_pte2(VM_MEMATTR_DEFAULT)
215 #define PTE2_ATTR_PT vm_memattr_to_pte2(pt_memattr)
216
217 #define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_PT)
218 #define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, PTE2_ATTR_PT)
219
220 #define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT)
221 #define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_DEFAULT)
222
223 #define PV_STATS
224 #ifdef PV_STATS
225 #define PV_STAT(x) do { x ; } while (0)
226 #else
227 #define PV_STAT(x) do { } while (0)
228 #endif
229
230 /*
231 * The boot_pt1 is used temporary in very early boot stage as L1 page table.
232 * We can init many things with no memory allocation thanks to its static
233 * allocation and this brings two main advantages:
234 * (1) other cores can be started very simply,
235 * (2) various boot loaders can be supported as its arguments can be processed
236 * in virtual address space and can be moved to safe location before
237 * first allocation happened.
238 * Only disadvantage is that boot_pt1 is used only in very early boot stage.
239 * However, the table is uninitialized and so lays in bss. Therefore kernel
240 * image size is not influenced.
241 *
242 * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and
243 * CPU suspend/resume game.
244 */
245 extern pt1_entry_t boot_pt1[];
246
247 vm_paddr_t base_pt1;
248 pt1_entry_t *kern_pt1;
249 pt2_entry_t *kern_pt2tab;
250 pt2_entry_t *PT2MAP;
251
252 static uint32_t ttb_flags;
253 static vm_memattr_t pt_memattr;
254 ttb_entry_t pmap_kern_ttb;
255
256 struct pmap kernel_pmap_store;
257 LIST_HEAD(pmaplist, pmap);
258 static struct pmaplist allpmaps;
259 static struct mtx allpmaps_lock;
260
261 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
262 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
263
264 static vm_offset_t kernel_vm_end_new;
265 vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE;
266 vm_offset_t vm_max_kernel_address;
267 vm_paddr_t kernel_l1pa;
268
269 static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock;
270
271 /*
272 * Data for the pv entry allocation mechanism
273 */
274 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
275 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
276 static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */
277 static int shpgperproc = PMAP_SHPGPERPROC;
278
279 struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */
280 int pv_maxchunks; /* How many chunks we have KVA for */
281 vm_offset_t pv_vafree; /* freelist stored in the PTE */
282
283 vm_paddr_t first_managed_pa;
284 #define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)])
285
286 /*
287 * All those kernel PT submaps that BSD is so fond of
288 */
289 caddr_t _tmppt = 0;
290
291 /*
292 * Crashdump maps.
293 */
294 static caddr_t crashdumpmap;
295
296 static pt2_entry_t *PMAP1 = NULL, *PMAP2;
297 static pt2_entry_t *PADDR1 = NULL, *PADDR2;
298 #ifdef DDB
299 static pt2_entry_t *PMAP3;
300 static pt2_entry_t *PADDR3;
301 static int PMAP3cpu __unused; /* for SMP only */
302 #endif
303 #ifdef SMP
304 static int PMAP1cpu;
305 static int PMAP1changedcpu;
306 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
307 &PMAP1changedcpu, 0,
308 "Number of times pmap_pte2_quick changed CPU with same PMAP1");
309 #endif
310 static int PMAP1changed;
311 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
312 &PMAP1changed, 0,
313 "Number of times pmap_pte2_quick changed PMAP1");
314 static int PMAP1unchanged;
315 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
316 &PMAP1unchanged, 0,
317 "Number of times pmap_pte2_quick didn't change PMAP1");
318 static struct mtx PMAP2mutex;
319
320 /*
321 * Internal flags for pmap_enter()'s helper functions.
322 */
323 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
324 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
325
326 static __inline void pt2_wirecount_init(vm_page_t m);
327 static bool pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p,
328 vm_offset_t va);
329 static int pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1,
330 u_int flags, vm_page_t m);
331 void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size);
332
333 /*
334 * Function to set the debug level of the pmap code.
335 */
336 #ifdef PMAP_DEBUG
337 void
pmap_debug(int level)338 pmap_debug(int level)
339 {
340
341 pmap_debug_level = level;
342 dprintf("pmap_debug: level=%d\n", pmap_debug_level);
343 }
344 #endif /* PMAP_DEBUG */
345
346 /*
347 * This table must corespond with memory attribute configuration in vm.h.
348 * First entry is used for normal system mapping.
349 *
350 * Device memory is always marked as shared.
351 * Normal memory is shared only in SMP .
352 * Not outer shareable bits are not used yet.
353 * Class 6 cannot be used on ARM11.
354 */
355 #define TEXDEF_TYPE_SHIFT 0
356 #define TEXDEF_TYPE_MASK 0x3
357 #define TEXDEF_INNER_SHIFT 2
358 #define TEXDEF_INNER_MASK 0x3
359 #define TEXDEF_OUTER_SHIFT 4
360 #define TEXDEF_OUTER_MASK 0x3
361 #define TEXDEF_NOS_SHIFT 6
362 #define TEXDEF_NOS_MASK 0x1
363
364 #define TEX(t, i, o, s) \
365 ((t) << TEXDEF_TYPE_SHIFT) | \
366 ((i) << TEXDEF_INNER_SHIFT) | \
367 ((o) << TEXDEF_OUTER_SHIFT | \
368 ((s) << TEXDEF_NOS_SHIFT))
369
370 static uint32_t tex_class[8] = {
371 /* type inner cache outer cache */
372 TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */
373 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */
374 TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */
375 TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */
376 TEX(PRRR_MEM, NMRR_WT, NMRR_WT, 0), /* 4 - ATTR_WT */
377 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */
378 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */
379 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */
380 };
381 #undef TEX
382
383 static uint32_t pte2_attr_tab[8] = {
384 PTE2_ATTR_WB_WA, /* 0 - VM_MEMATTR_WB_WA */
385 PTE2_ATTR_NOCACHE, /* 1 - VM_MEMATTR_NOCACHE */
386 PTE2_ATTR_DEVICE, /* 2 - VM_MEMATTR_DEVICE */
387 PTE2_ATTR_SO, /* 3 - VM_MEMATTR_SO */
388 PTE2_ATTR_WT, /* 4 - VM_MEMATTR_WRITE_THROUGH */
389 0, /* 5 - NOT USED YET */
390 0, /* 6 - NOT USED YET */
391 0 /* 7 - NOT USED YET */
392 };
393 CTASSERT(VM_MEMATTR_WB_WA == 0);
394 CTASSERT(VM_MEMATTR_NOCACHE == 1);
395 CTASSERT(VM_MEMATTR_DEVICE == 2);
396 CTASSERT(VM_MEMATTR_SO == 3);
397 CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4);
398 #define VM_MEMATTR_END (VM_MEMATTR_WRITE_THROUGH + 1)
399
400 bool
pmap_is_valid_memattr(pmap_t pmap __unused,vm_memattr_t mode)401 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
402 {
403
404 return (mode >= 0 && mode < VM_MEMATTR_END);
405 }
406
407 static inline uint32_t
vm_memattr_to_pte2(vm_memattr_t ma)408 vm_memattr_to_pte2(vm_memattr_t ma)
409 {
410
411 KASSERT((u_int)ma < VM_MEMATTR_END,
412 ("%s: bad vm_memattr_t %d", __func__, ma));
413 return (pte2_attr_tab[(u_int)ma]);
414 }
415
416 static inline uint32_t
vm_page_pte2_attr(vm_page_t m)417 vm_page_pte2_attr(vm_page_t m)
418 {
419
420 return (vm_memattr_to_pte2(m->md.pat_mode));
421 }
422
423 /*
424 * Convert TEX definition entry to TTB flags.
425 */
426 static uint32_t
encode_ttb_flags(int idx)427 encode_ttb_flags(int idx)
428 {
429 uint32_t inner, outer, nos, reg;
430
431 inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) &
432 TEXDEF_INNER_MASK;
433 outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) &
434 TEXDEF_OUTER_MASK;
435 nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) &
436 TEXDEF_NOS_MASK;
437
438 reg = nos << 5;
439 reg |= outer << 3;
440 if (cpuinfo.coherent_walk)
441 reg |= (inner & 0x1) << 6;
442 reg |= (inner & 0x2) >> 1;
443 #ifdef SMP
444 ARM_SMP_UP(
445 reg |= 1 << 1,
446 );
447 #endif
448 return reg;
449 }
450
451 /*
452 * Set TEX remapping registers in current CPU.
453 */
454 void
pmap_set_tex(void)455 pmap_set_tex(void)
456 {
457 uint32_t prrr, nmrr;
458 uint32_t type, inner, outer, nos;
459 int i;
460
461 #ifdef PMAP_PTE_NOCACHE
462 /* XXX fixme */
463 if (cpuinfo.coherent_walk) {
464 pt_memattr = VM_MEMATTR_WB_WA;
465 ttb_flags = encode_ttb_flags(0);
466 }
467 else {
468 pt_memattr = VM_MEMATTR_NOCACHE;
469 ttb_flags = encode_ttb_flags(1);
470 }
471 #else
472 pt_memattr = VM_MEMATTR_WB_WA;
473 ttb_flags = encode_ttb_flags(0);
474 #endif
475
476 prrr = 0;
477 nmrr = 0;
478
479 /* Build remapping register from TEX classes. */
480 for (i = 0; i < 8; i++) {
481 type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) &
482 TEXDEF_TYPE_MASK;
483 inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) &
484 TEXDEF_INNER_MASK;
485 outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) &
486 TEXDEF_OUTER_MASK;
487 nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) &
488 TEXDEF_NOS_MASK;
489
490 prrr |= type << (i * 2);
491 prrr |= nos << (i + 24);
492 nmrr |= inner << (i * 2);
493 nmrr |= outer << (i * 2 + 16);
494 }
495 /* Add shareable bits for device memory. */
496 prrr |= PRRR_DS0 | PRRR_DS1;
497
498 /* Add shareable bits for normal memory in SMP case. */
499 #ifdef SMP
500 ARM_SMP_UP(
501 prrr |= PRRR_NS1,
502 );
503 #endif
504 cp15_prrr_set(prrr);
505 cp15_nmrr_set(nmrr);
506
507 /* Caches are disabled, so full TLB flush should be enough. */
508 tlb_flush_all_local();
509 }
510
511 /*
512 * Remap one vm_meattr class to another one. This can be useful as
513 * workaround for SOC errata, e.g. if devices must be accessed using
514 * SO memory class.
515 *
516 * !!! Please note that this function is absolutely last resort thing.
517 * It should not be used under normal circumstances. !!!
518 *
519 * Usage rules:
520 * - it shall be called after pmap_bootstrap_prepare() and before
521 * cpu_mp_start() (thus only on boot CPU). In practice, it's expected
522 * to be called from platform_attach() or platform_late_init().
523 *
524 * - if remapping doesn't change caching mode, or until uncached class
525 * is remapped to any kind of cached one, then no other restriction exists.
526 *
527 * - if pmap_remap_vm_attr() changes caching mode, but both (original and
528 * remapped) remain cached, then caller is resposible for calling
529 * of dcache_wbinv_poc_all().
530 *
531 * - remapping of any kind of cached class to uncached is not permitted.
532 */
533 void
pmap_remap_vm_attr(vm_memattr_t old_attr,vm_memattr_t new_attr)534 pmap_remap_vm_attr(vm_memattr_t old_attr, vm_memattr_t new_attr)
535 {
536 int old_idx, new_idx;
537
538 /* Map VM memattrs to indexes to tex_class table. */
539 old_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)old_attr]);
540 new_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)new_attr]);
541
542 /* Replace TEX attribute and apply it. */
543 tex_class[old_idx] = tex_class[new_idx];
544 pmap_set_tex();
545 }
546
547 /*
548 * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words,
549 * KERNBASE is mapped by first L2 page table in L2 page table page. It
550 * meets same constrain due to PT2MAP being placed just under KERNBASE.
551 */
552 CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0);
553 CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE);
554
555 /*
556 * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general.
557 * For now, anyhow, the following check must be fulfilled.
558 */
559 CTASSERT(PAGE_SIZE == PTE2_SIZE);
560 /*
561 * We don't want to mess up MI code with all MMU and PMAP definitions,
562 * so some things, which depend on other ones, are defined independently.
563 * Now, it is time to check that we don't screw up something.
564 */
565 CTASSERT(PDRSHIFT == PTE1_SHIFT);
566 /*
567 * Check L1 and L2 page table entries definitions consistency.
568 */
569 CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1));
570 CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2));
571 /*
572 * Check L2 page tables page consistency.
573 */
574 CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2));
575 CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG);
576 /*
577 * Check PT2TAB consistency.
578 * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG.
579 * This should be done without remainder.
580 */
581 CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG));
582
583 /*
584 * A PT2MAP magic.
585 *
586 * All level 2 page tables (PT2s) are mapped continuously and accordingly
587 * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can
588 * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page
589 * must be used together, but not necessary at once. The first PT2 in a page
590 * must map things on correctly aligned address and the others must follow
591 * in right order.
592 */
593 #define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t))
594 #define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2)
595 #define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE)
596
597 /*
598 * Check PT2TAB consistency.
599 * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2.
600 * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE.
601 * The both should be done without remainder.
602 */
603 CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2));
604 CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE));
605 /*
606 * The implementation was made general, however, with the assumption
607 * bellow in mind. In case of another value of NPG_IN_PT2TAB,
608 * the code should be once more rechecked.
609 */
610 CTASSERT(NPG_IN_PT2TAB == 1);
611
612 /*
613 * Get offset of PT2 in a page
614 * associated with given PT1 index.
615 */
616 static __inline u_int
page_pt2off(u_int pt1_idx)617 page_pt2off(u_int pt1_idx)
618 {
619
620 return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2);
621 }
622
623 /*
624 * Get physical address of PT2
625 * associated with given PT2s page and PT1 index.
626 */
627 static __inline vm_paddr_t
page_pt2pa(vm_paddr_t pgpa,u_int pt1_idx)628 page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx)
629 {
630
631 return (pgpa + page_pt2off(pt1_idx));
632 }
633
634 /*
635 * Get first entry of PT2
636 * associated with given PT2s page and PT1 index.
637 */
638 static __inline pt2_entry_t *
page_pt2(vm_offset_t pgva,u_int pt1_idx)639 page_pt2(vm_offset_t pgva, u_int pt1_idx)
640 {
641
642 return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx)));
643 }
644
645 /*
646 * Get virtual address of PT2s page (mapped in PT2MAP)
647 * which holds PT2 which holds entry which maps given virtual address.
648 */
649 static __inline vm_offset_t
pt2map_pt2pg(vm_offset_t va)650 pt2map_pt2pg(vm_offset_t va)
651 {
652
653 va &= ~(NPT2_IN_PG * PTE1_SIZE - 1);
654 return ((vm_offset_t)pt2map_entry(va));
655 }
656
657 /*****************************************************************************
658 *
659 * THREE pmap initialization milestones exist:
660 *
661 * locore.S
662 * -> fundamental init (including MMU) in ASM
663 *
664 * initarm()
665 * -> fundamental init continues in C
666 * -> first available physical address is known
667 *
668 * pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins)
669 * -> basic (safe) interface for physical address allocation is made
670 * -> basic (safe) interface for virtual mapping is made
671 * -> limited not SMP coherent work is possible
672 *
673 * -> more fundamental init continues in C
674 * -> locks and some more things are available
675 * -> all fundamental allocations and mappings are done
676 *
677 * pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins)
678 * -> phys_avail[] and virtual_avail is set
679 * -> control is passed to vm subsystem
680 * -> physical and virtual address allocation are off limit
681 * -> low level mapping functions, some SMP coherent,
682 * are available, which cannot be used before vm subsystem
683 * is being inited
684 *
685 * mi_startup()
686 * -> vm subsystem is being inited
687 *
688 * pmap_init() -> THIRD PMAP MILESTONE (third epoch begins)
689 * -> pmap is fully inited
690 *
691 *****************************************************************************/
692
693 /*****************************************************************************
694 *
695 * PMAP first stage initialization and utility functions
696 * for pre-bootstrap epoch.
697 *
698 * After pmap_bootstrap_prepare() is called, the following functions
699 * can be used:
700 *
701 * (1) strictly only for this stage functions for physical page allocations,
702 * virtual space allocations, and mappings:
703 *
704 * vm_paddr_t pmap_preboot_get_pages(u_int num);
705 * void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num);
706 * vm_offset_t pmap_preboot_reserve_pages(u_int num);
707 * vm_offset_t pmap_preboot_get_vpages(u_int num);
708 * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size,
709 * vm_prot_t prot, vm_memattr_t attr);
710 *
711 * (2) for all stages:
712 *
713 * vm_paddr_t pmap_kextract(vm_offset_t va);
714 *
715 * NOTE: This is not SMP coherent stage.
716 *
717 *****************************************************************************/
718
719 #define KERNEL_P2V(pa) \
720 ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR))
721 #define KERNEL_V2P(va) \
722 ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr))
723
724 static vm_paddr_t last_paddr;
725
726 /*
727 * Pre-bootstrap epoch page allocator.
728 */
729 vm_paddr_t
pmap_preboot_get_pages(u_int num)730 pmap_preboot_get_pages(u_int num)
731 {
732 vm_paddr_t ret;
733
734 ret = last_paddr;
735 last_paddr += num * PAGE_SIZE;
736
737 return (ret);
738 }
739
740 /*
741 * The fundamental initialization of PMAP stuff.
742 *
743 * Some things already happened in locore.S and some things could happen
744 * before pmap_bootstrap_prepare() is called, so let's recall what is done:
745 * 1. Caches are disabled.
746 * 2. We are running on virtual addresses already with 'boot_pt1'
747 * as L1 page table.
748 * 3. So far, all virtual addresses can be converted to physical ones and
749 * vice versa by the following macros:
750 * KERNEL_P2V(pa) .... physical to virtual ones,
751 * KERNEL_V2P(va) .... virtual to physical ones.
752 *
753 * What is done herein:
754 * 1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'.
755 * 2. PT2MAP magic is brought to live.
756 * 3. Basic preboot functions for page allocations and mappings can be used.
757 * 4. Everything is prepared for L1 cache enabling.
758 *
759 * Variations:
760 * 1. To use second TTB register, so kernel and users page tables will be
761 * separated. This way process forking - pmap_pinit() - could be faster,
762 * it saves physical pages and KVA per a process, and it's simple change.
763 * However, it will lead, due to hardware matter, to the following:
764 * (a) 2G space for kernel and 2G space for users.
765 * (b) 1G space for kernel in low addresses and 3G for users above it.
766 * A question is: Is the case (b) really an option? Note that case (b)
767 * does save neither physical memory and KVA.
768 */
769 void
pmap_bootstrap_prepare(vm_paddr_t last)770 pmap_bootstrap_prepare(vm_paddr_t last)
771 {
772 vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size;
773 vm_offset_t pt2pg_va;
774 pt1_entry_t *pte1p;
775 pt2_entry_t *pte2p;
776 u_int i;
777 uint32_t l1_attr;
778
779 /*
780 * Now, we are going to make real kernel mapping. Note that we are
781 * already running on some mapping made in locore.S and we expect
782 * that it's large enough to ensure nofault access to physical memory
783 * allocated herein before switch.
784 *
785 * As kernel image and everything needed before are and will be mapped
786 * by section mappings, we align last physical address to PTE1_SIZE.
787 */
788 last_paddr = pte1_roundup(last);
789
790 /*
791 * Allocate and zero page(s) for kernel L1 page table.
792 *
793 * Note that it's first allocation on space which was PTE1_SIZE
794 * aligned and as such base_pt1 is aligned to NB_IN_PT1 too.
795 */
796 base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1);
797 kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1);
798 bzero((void*)kern_pt1, NB_IN_PT1);
799 pte1_sync_range(kern_pt1, NB_IN_PT1);
800
801 /* Allocate and zero page(s) for kernel PT2TAB. */
802 pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB);
803 kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa);
804 bzero(kern_pt2tab, NB_IN_PT2TAB);
805 pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB);
806
807 /* Allocate and zero page(s) for kernel L2 page tables. */
808 pt2pg_pa = pmap_preboot_get_pages(NKPT2PG);
809 pt2pg_va = KERNEL_P2V(pt2pg_pa);
810 size = NKPT2PG * PAGE_SIZE;
811 bzero((void*)pt2pg_va, size);
812 pte2_sync_range((pt2_entry_t *)pt2pg_va, size);
813
814 /*
815 * Add a physical memory segment (vm_phys_seg) corresponding to the
816 * preallocated pages for kernel L2 page tables so that vm_page
817 * structures representing these pages will be created. The vm_page
818 * structures are required for promotion of the corresponding kernel
819 * virtual addresses to section mappings.
820 */
821 vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0));
822
823 /*
824 * Insert allocated L2 page table pages to PT2TAB and make
825 * link to all PT2s in L1 page table. See how kernel_vm_end
826 * is initialized.
827 *
828 * We play simple and safe. So every KVA will have underlaying
829 * L2 page table, even kernel image mapped by sections.
830 */
831 pte2p = kern_pt2tab_entry(KERNBASE);
832 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE)
833 pt2tab_store(pte2p++, PTE2_KPT(pa));
834
835 pte1p = kern_pte1(KERNBASE);
836 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2)
837 pte1_store(pte1p++, PTE1_LINK(pa));
838
839 /* Make section mappings for kernel. */
840 l1_attr = ATTR_TO_L1(PTE2_ATTR_DEFAULT);
841 pte1p = kern_pte1(KERNBASE);
842 for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE)
843 pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW, l1_attr));
844
845 /*
846 * Get free and aligned space for PT2MAP and make L1 page table links
847 * to L2 page tables held in PT2TAB.
848 *
849 * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t
850 * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus
851 * each entry in PT2TAB maps all PT2s in a page. This implies that
852 * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE.
853 */
854 PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE);
855 pte1p = kern_pte1((vm_offset_t)PT2MAP);
856 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) {
857 pte1_store(pte1p++, PTE1_LINK(pa));
858 }
859
860 /*
861 * Store PT2TAB in PT2TAB itself, i.e. self reference mapping.
862 * Each pmap will hold own PT2TAB, so the mapping should be not global.
863 */
864 pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP);
865 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) {
866 pt2tab_store(pte2p++, PTE2_KPT_NG(pa));
867 }
868
869 /*
870 * Choose correct L2 page table and make mappings for allocations
871 * made herein which replaces temporary locore.S mappings after a while.
872 * Note that PT2MAP cannot be used until we switch to kern_pt1.
873 *
874 * Note, that these allocations started aligned on 1M section and
875 * kernel PT1 was allocated first. Making of mappings must follow
876 * order of physical allocations as we've used KERNEL_P2V() macro
877 * for virtual addresses resolution.
878 */
879 pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1);
880 pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p)));
881
882 pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1));
883
884 /* Make mapping for kernel L1 page table. */
885 for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE)
886 pte2_store(pte2p++, PTE2_KPT(pa));
887
888 /* Make mapping for kernel PT2TAB. */
889 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE)
890 pte2_store(pte2p++, PTE2_KPT(pa));
891
892 /* Finally, switch from 'boot_pt1' to 'kern_pt1'. */
893 pmap_kern_ttb = base_pt1 | ttb_flags;
894 cpuinfo_reinit_mmu(pmap_kern_ttb);
895 /*
896 * Initialize the first available KVA. As kernel image is mapped by
897 * sections, we are leaving some gap behind.
898 */
899 virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE;
900 }
901
902 /*
903 * Setup L2 page table page for given KVA.
904 * Used in pre-bootstrap epoch.
905 *
906 * Note that we have allocated NKPT2PG pages for L2 page tables in advance
907 * and used them for mapping KVA starting from KERNBASE. However, this is not
908 * enough. Vectors and devices need L2 page tables too. Note that they are
909 * even above VM_MAX_KERNEL_ADDRESS.
910 */
911 static __inline vm_paddr_t
pmap_preboot_pt2pg_setup(vm_offset_t va)912 pmap_preboot_pt2pg_setup(vm_offset_t va)
913 {
914 pt2_entry_t *pte2p, pte2;
915 vm_paddr_t pt2pg_pa;
916
917 /* Get associated entry in PT2TAB. */
918 pte2p = kern_pt2tab_entry(va);
919
920 /* Just return, if PT2s page exists already. */
921 pte2 = pt2tab_load(pte2p);
922 if (pte2_is_valid(pte2))
923 return (pte2_pa(pte2));
924
925 KASSERT(va >= VM_MAX_KERNEL_ADDRESS,
926 ("%s: NKPT2PG too small", __func__));
927
928 /*
929 * Allocate page for PT2s and insert it to PT2TAB.
930 * In other words, map it into PT2MAP space.
931 */
932 pt2pg_pa = pmap_preboot_get_pages(1);
933 pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa));
934
935 /* Zero all PT2s in allocated page. */
936 bzero((void*)pt2map_pt2pg(va), PAGE_SIZE);
937 pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE);
938
939 return (pt2pg_pa);
940 }
941
942 /*
943 * Setup L2 page table for given KVA.
944 * Used in pre-bootstrap epoch.
945 */
946 static void
pmap_preboot_pt2_setup(vm_offset_t va)947 pmap_preboot_pt2_setup(vm_offset_t va)
948 {
949 pt1_entry_t *pte1p;
950 vm_paddr_t pt2pg_pa, pt2_pa;
951
952 /* Setup PT2's page. */
953 pt2pg_pa = pmap_preboot_pt2pg_setup(va);
954 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va));
955
956 /* Insert PT2 to PT1. */
957 pte1p = kern_pte1(va);
958 pte1_store(pte1p, PTE1_LINK(pt2_pa));
959 }
960
961 /*
962 * Get L2 page entry associated with given KVA.
963 * Used in pre-bootstrap epoch.
964 */
965 static __inline pt2_entry_t*
pmap_preboot_vtopte2(vm_offset_t va)966 pmap_preboot_vtopte2(vm_offset_t va)
967 {
968 pt1_entry_t *pte1p;
969
970 /* Setup PT2 if needed. */
971 pte1p = kern_pte1(va);
972 if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */
973 pmap_preboot_pt2_setup(va);
974
975 return (pt2map_entry(va));
976 }
977
978 /*
979 * Pre-bootstrap epoch page(s) mapping(s).
980 */
981 void
pmap_preboot_map_pages(vm_paddr_t pa,vm_offset_t va,u_int num)982 pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num)
983 {
984 u_int i;
985 pt2_entry_t *pte2p;
986
987 /* Map all the pages. */
988 for (i = 0; i < num; i++) {
989 pte2p = pmap_preboot_vtopte2(va);
990 pte2_store(pte2p, PTE2_KRW(pa));
991 va += PAGE_SIZE;
992 pa += PAGE_SIZE;
993 }
994 }
995
996 /*
997 * Pre-bootstrap epoch virtual space alocator.
998 */
999 vm_offset_t
pmap_preboot_reserve_pages(u_int num)1000 pmap_preboot_reserve_pages(u_int num)
1001 {
1002 u_int i;
1003 vm_offset_t start, va;
1004 pt2_entry_t *pte2p;
1005
1006 /* Allocate virtual space. */
1007 start = va = virtual_avail;
1008 virtual_avail += num * PAGE_SIZE;
1009
1010 /* Zero the mapping. */
1011 for (i = 0; i < num; i++) {
1012 pte2p = pmap_preboot_vtopte2(va);
1013 pte2_store(pte2p, 0);
1014 va += PAGE_SIZE;
1015 }
1016
1017 return (start);
1018 }
1019
1020 /*
1021 * Pre-bootstrap epoch page(s) allocation and mapping(s).
1022 */
1023 vm_offset_t
pmap_preboot_get_vpages(u_int num)1024 pmap_preboot_get_vpages(u_int num)
1025 {
1026 vm_paddr_t pa;
1027 vm_offset_t va;
1028
1029 /* Allocate physical page(s). */
1030 pa = pmap_preboot_get_pages(num);
1031
1032 /* Allocate virtual space. */
1033 va = virtual_avail;
1034 virtual_avail += num * PAGE_SIZE;
1035
1036 /* Map and zero all. */
1037 pmap_preboot_map_pages(pa, va, num);
1038 bzero((void *)va, num * PAGE_SIZE);
1039
1040 return (va);
1041 }
1042
1043 /*
1044 * Pre-bootstrap epoch page mapping(s) with attributes.
1045 */
1046 void
pmap_preboot_map_attr(vm_paddr_t pa,vm_offset_t va,vm_size_t size,vm_prot_t prot,vm_memattr_t attr)1047 pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size,
1048 vm_prot_t prot, vm_memattr_t attr)
1049 {
1050 u_int num;
1051 u_int l1_attr, l1_prot, l2_prot, l2_attr;
1052 pt1_entry_t *pte1p;
1053 pt2_entry_t *pte2p;
1054
1055 l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR;
1056 l2_prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX;
1057 l2_attr = vm_memattr_to_pte2(attr);
1058 l1_prot = ATTR_TO_L1(l2_prot);
1059 l1_attr = ATTR_TO_L1(l2_attr);
1060
1061 /* Map all the pages. */
1062 num = round_page(size);
1063 while (num > 0) {
1064 if ((((va | pa) & PTE1_OFFSET) == 0) && (num >= PTE1_SIZE)) {
1065 pte1p = kern_pte1(va);
1066 pte1_store(pte1p, PTE1_KERN(pa, l1_prot, l1_attr));
1067 va += PTE1_SIZE;
1068 pa += PTE1_SIZE;
1069 num -= PTE1_SIZE;
1070 } else {
1071 pte2p = pmap_preboot_vtopte2(va);
1072 pte2_store(pte2p, PTE2_KERN(pa, l2_prot, l2_attr));
1073 va += PAGE_SIZE;
1074 pa += PAGE_SIZE;
1075 num -= PAGE_SIZE;
1076 }
1077 }
1078 }
1079
1080 /*
1081 * Extract from the kernel page table the physical address
1082 * that is mapped by the given virtual address "va".
1083 */
1084 vm_paddr_t
pmap_kextract(vm_offset_t va)1085 pmap_kextract(vm_offset_t va)
1086 {
1087 vm_paddr_t pa;
1088 pt1_entry_t pte1;
1089 pt2_entry_t pte2;
1090
1091 pte1 = pte1_load(kern_pte1(va));
1092 if (pte1_is_section(pte1)) {
1093 pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1094 } else if (pte1_is_link(pte1)) {
1095 /*
1096 * We should beware of concurrent promotion that changes
1097 * pte1 at this point. However, it's not a problem as PT2
1098 * page is preserved by promotion in PT2TAB. So even if
1099 * it happens, using of PT2MAP is still safe.
1100 *
1101 * QQQ: However, concurrent removing is a problem which
1102 * ends in abort on PT2MAP space. Locking must be used
1103 * to deal with this.
1104 */
1105 pte2 = pte2_load(pt2map_entry(va));
1106 pa = pte2_pa(pte2) | (va & PTE2_OFFSET);
1107 }
1108 else {
1109 panic("%s: va %#x pte1 %#x", __func__, va, pte1);
1110 }
1111 return (pa);
1112 }
1113
1114 /*
1115 * Extract from the kernel page table the physical address
1116 * that is mapped by the given virtual address "va". Also
1117 * return L2 page table entry which maps the address.
1118 *
1119 * This is only intended to be used for panic dumps.
1120 */
1121 vm_paddr_t
pmap_dump_kextract(vm_offset_t va,pt2_entry_t * pte2p)1122 pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p)
1123 {
1124 vm_paddr_t pa;
1125 pt1_entry_t pte1;
1126 pt2_entry_t pte2;
1127
1128 pte1 = pte1_load(kern_pte1(va));
1129 if (pte1_is_section(pte1)) {
1130 pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1131 pte2 = pa | ATTR_TO_L2(pte1) | PTE2_V;
1132 } else if (pte1_is_link(pte1)) {
1133 pte2 = pte2_load(pt2map_entry(va));
1134 pa = pte2_pa(pte2);
1135 } else {
1136 pte2 = 0;
1137 pa = 0;
1138 }
1139 if (pte2p != NULL)
1140 *pte2p = pte2;
1141 return (pa);
1142 }
1143
1144 /*****************************************************************************
1145 *
1146 * PMAP second stage initialization and utility functions
1147 * for bootstrap epoch.
1148 *
1149 * After pmap_bootstrap() is called, the following functions for
1150 * mappings can be used:
1151 *
1152 * void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
1153 * void pmap_kremove(vm_offset_t va);
1154 * vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end,
1155 * int prot);
1156 *
1157 * NOTE: This is not SMP coherent stage. And physical page allocation is not
1158 * allowed during this stage.
1159 *
1160 *****************************************************************************/
1161
1162 /*
1163 * Initialize kernel PMAP locks and lists, kernel_pmap itself, and
1164 * reserve various virtual spaces for temporary mappings.
1165 */
1166 void
pmap_bootstrap(vm_offset_t firstaddr)1167 pmap_bootstrap(vm_offset_t firstaddr)
1168 {
1169 pt2_entry_t *unused __unused;
1170 struct pcpu *pc;
1171
1172 /*
1173 * Initialize the kernel pmap (which is statically allocated).
1174 */
1175 PMAP_LOCK_INIT(kernel_pmap);
1176 kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */
1177 kernel_pmap->pm_pt1 = kern_pt1;
1178 kernel_pmap->pm_pt2tab = kern_pt2tab;
1179 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
1180 TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1181
1182 /*
1183 * Initialize the global pv list lock.
1184 */
1185 rw_init(&pvh_global_lock, "pmap pv global");
1186
1187 LIST_INIT(&allpmaps);
1188
1189 /*
1190 * Request a spin mutex so that changes to allpmaps cannot be
1191 * preempted by smp_rendezvous_cpus().
1192 */
1193 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
1194 mtx_lock_spin(&allpmaps_lock);
1195 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
1196 mtx_unlock_spin(&allpmaps_lock);
1197
1198 /*
1199 * Reserve some special page table entries/VA space for temporary
1200 * mapping of pages.
1201 */
1202 #define SYSMAP(c, p, v, n) do { \
1203 v = (c)pmap_preboot_reserve_pages(n); \
1204 p = pt2map_entry((vm_offset_t)v); \
1205 } while (0)
1206
1207 /*
1208 * Local CMAP1/CMAP2 are used for zeroing and copying pages.
1209 * Local CMAP2 is also used for data cache cleaning.
1210 */
1211 pc = get_pcpu();
1212 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
1213 SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1);
1214 SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1);
1215 SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1);
1216
1217 /*
1218 * Crashdump maps.
1219 */
1220 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS);
1221
1222 /*
1223 * _tmppt is used for reading arbitrary physical pages via /dev/mem.
1224 */
1225 SYSMAP(caddr_t, unused, _tmppt, 1);
1226
1227 /*
1228 * PADDR1 and PADDR2 are used by pmap_pte2_quick() and pmap_pte2(),
1229 * respectively. PADDR3 is used by pmap_pte2_ddb().
1230 */
1231 SYSMAP(pt2_entry_t *, PMAP1, PADDR1, 1);
1232 SYSMAP(pt2_entry_t *, PMAP2, PADDR2, 1);
1233 #ifdef DDB
1234 SYSMAP(pt2_entry_t *, PMAP3, PADDR3, 1);
1235 #endif
1236 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
1237
1238 /*
1239 * Note that in very short time in initarm(), we are going to
1240 * initialize phys_avail[] array and no further page allocation
1241 * can happen after that until vm subsystem will be initialized.
1242 */
1243 kernel_vm_end_new = kernel_vm_end;
1244 virtual_end = vm_max_kernel_address;
1245 }
1246
1247 static void
pmap_init_reserved_pages(void)1248 pmap_init_reserved_pages(void)
1249 {
1250 struct pcpu *pc;
1251 vm_offset_t pages;
1252 int i;
1253
1254 CPU_FOREACH(i) {
1255 pc = pcpu_find(i);
1256 /*
1257 * Skip if the mapping has already been initialized,
1258 * i.e. this is the BSP.
1259 */
1260 if (pc->pc_cmap1_addr != 0)
1261 continue;
1262 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
1263 pages = kva_alloc(PAGE_SIZE * 3);
1264 if (pages == 0)
1265 panic("%s: unable to allocate KVA", __func__);
1266 pc->pc_cmap1_pte2p = pt2map_entry(pages);
1267 pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE);
1268 pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2));
1269 pc->pc_cmap1_addr = (caddr_t)pages;
1270 pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE);
1271 pc->pc_qmap_addr = pages + (PAGE_SIZE * 2);
1272 }
1273 }
1274 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL);
1275
1276 /*
1277 * The function can already be use in second initialization stage.
1278 * As such, the function DOES NOT call pmap_growkernel() where PT2
1279 * allocation can happen. So if used, be sure that PT2 for given
1280 * virtual address is allocated already!
1281 *
1282 * Add a wired page to the kva.
1283 * Note: not SMP coherent.
1284 */
1285 static __inline void
pmap_kenter_prot_attr(vm_offset_t va,vm_paddr_t pa,uint32_t prot,uint32_t attr)1286 pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot,
1287 uint32_t attr)
1288 {
1289 pt1_entry_t *pte1p;
1290 pt2_entry_t *pte2p;
1291
1292 pte1p = kern_pte1(va);
1293 if (!pte1_is_valid(pte1_load(pte1p))) { /* XXX - sections ?! */
1294 /*
1295 * This is a very low level function, so PT2 and particularly
1296 * PT2PG associated with given virtual address must be already
1297 * allocated. It's a pain mainly during pmap initialization
1298 * stage. However, called after pmap initialization with
1299 * virtual address not under kernel_vm_end will lead to
1300 * the same misery.
1301 */
1302 if (!pte2_is_valid(pte2_load(kern_pt2tab_entry(va))))
1303 panic("%s: kernel PT2 not allocated!", __func__);
1304 }
1305
1306 pte2p = pt2map_entry(va);
1307 pte2_store(pte2p, PTE2_KERN(pa, prot, attr));
1308 }
1309
1310 PMAP_INLINE void
pmap_kenter(vm_offset_t va,vm_paddr_t pa)1311 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1312 {
1313
1314 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT);
1315 }
1316
1317 /*
1318 * Remove a page from the kernel pagetables.
1319 * Note: not SMP coherent.
1320 */
1321 PMAP_INLINE void
pmap_kremove(vm_offset_t va)1322 pmap_kremove(vm_offset_t va)
1323 {
1324 pt1_entry_t *pte1p;
1325 pt2_entry_t *pte2p;
1326
1327 pte1p = kern_pte1(va);
1328 if (pte1_is_section(pte1_load(pte1p))) {
1329 pte1_clear(pte1p);
1330 } else {
1331 pte2p = pt2map_entry(va);
1332 pte2_clear(pte2p);
1333 }
1334 }
1335
1336 /*
1337 * Share new kernel PT2PG with all pmaps.
1338 * The caller is responsible for maintaining TLB consistency.
1339 */
1340 static void
pmap_kenter_pt2tab(vm_offset_t va,pt2_entry_t npte2)1341 pmap_kenter_pt2tab(vm_offset_t va, pt2_entry_t npte2)
1342 {
1343 pmap_t pmap;
1344 pt2_entry_t *pte2p;
1345
1346 mtx_lock_spin(&allpmaps_lock);
1347 LIST_FOREACH(pmap, &allpmaps, pm_list) {
1348 pte2p = pmap_pt2tab_entry(pmap, va);
1349 pt2tab_store(pte2p, npte2);
1350 }
1351 mtx_unlock_spin(&allpmaps_lock);
1352 }
1353
1354 /*
1355 * Share new kernel PTE1 with all pmaps.
1356 * The caller is responsible for maintaining TLB consistency.
1357 */
1358 static void
pmap_kenter_pte1(vm_offset_t va,pt1_entry_t npte1)1359 pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1)
1360 {
1361 pmap_t pmap;
1362 pt1_entry_t *pte1p;
1363
1364 mtx_lock_spin(&allpmaps_lock);
1365 LIST_FOREACH(pmap, &allpmaps, pm_list) {
1366 pte1p = pmap_pte1(pmap, va);
1367 pte1_store(pte1p, npte1);
1368 }
1369 mtx_unlock_spin(&allpmaps_lock);
1370 }
1371
1372 /*
1373 * Used to map a range of physical addresses into kernel
1374 * virtual address space.
1375 *
1376 * The value passed in '*virt' is a suggested virtual address for
1377 * the mapping. Architectures which can support a direct-mapped
1378 * physical to virtual region can return the appropriate address
1379 * within that region, leaving '*virt' unchanged. Other
1380 * architectures should map the pages starting at '*virt' and
1381 * update '*virt' with the first usable address after the mapped
1382 * region.
1383 *
1384 * NOTE: Read the comments above pmap_kenter_prot_attr() as
1385 * the function is used herein!
1386 */
1387 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)1388 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1389 {
1390 vm_offset_t va, sva;
1391 vm_paddr_t pte1_offset;
1392 pt1_entry_t npte1;
1393 uint32_t l1prot, l2prot;
1394 uint32_t l1attr, l2attr;
1395
1396 PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x),"
1397 " prot = %d\n", __func__, *virt, start, end, end - start, prot));
1398
1399 l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR;
1400 l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX;
1401 l1prot = ATTR_TO_L1(l2prot);
1402
1403 l2attr = PTE2_ATTR_DEFAULT;
1404 l1attr = ATTR_TO_L1(l2attr);
1405
1406 va = *virt;
1407 /*
1408 * Does the physical address range's size and alignment permit at
1409 * least one section mapping to be created?
1410 */
1411 pte1_offset = start & PTE1_OFFSET;
1412 if ((end - start) - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) >=
1413 PTE1_SIZE) {
1414 /*
1415 * Increase the starting virtual address so that its alignment
1416 * does not preclude the use of section mappings.
1417 */
1418 if ((va & PTE1_OFFSET) < pte1_offset)
1419 va = pte1_trunc(va) + pte1_offset;
1420 else if ((va & PTE1_OFFSET) > pte1_offset)
1421 va = pte1_roundup(va) + pte1_offset;
1422 }
1423 sva = va;
1424 while (start < end) {
1425 if ((start & PTE1_OFFSET) == 0 && end - start >= PTE1_SIZE) {
1426 KASSERT((va & PTE1_OFFSET) == 0,
1427 ("%s: misaligned va %#x", __func__, va));
1428 npte1 = PTE1_KERN(start, l1prot, l1attr);
1429 pmap_kenter_pte1(va, npte1);
1430 va += PTE1_SIZE;
1431 start += PTE1_SIZE;
1432 } else {
1433 pmap_kenter_prot_attr(va, start, l2prot, l2attr);
1434 va += PAGE_SIZE;
1435 start += PAGE_SIZE;
1436 }
1437 }
1438 tlb_flush_range(sva, va - sva);
1439 *virt = va;
1440 return (sva);
1441 }
1442
1443 /*
1444 * Make a temporary mapping for a physical address.
1445 * This is only intended to be used for panic dumps.
1446 */
1447 void *
pmap_kenter_temporary(vm_paddr_t pa,int i)1448 pmap_kenter_temporary(vm_paddr_t pa, int i)
1449 {
1450 vm_offset_t va;
1451
1452 /* QQQ: 'i' should be less or equal to MAXDUMPPGS. */
1453
1454 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
1455 pmap_kenter(va, pa);
1456 tlb_flush_local(va);
1457 return ((void *)crashdumpmap);
1458 }
1459
1460 /*************************************
1461 *
1462 * TLB & cache maintenance routines.
1463 *
1464 *************************************/
1465
1466 /*
1467 * We inline these within pmap.c for speed.
1468 */
1469 PMAP_INLINE void
pmap_tlb_flush(pmap_t pmap,vm_offset_t va)1470 pmap_tlb_flush(pmap_t pmap, vm_offset_t va)
1471 {
1472
1473 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1474 tlb_flush(va);
1475 }
1476
1477 PMAP_INLINE void
pmap_tlb_flush_range(pmap_t pmap,vm_offset_t sva,vm_size_t size)1478 pmap_tlb_flush_range(pmap_t pmap, vm_offset_t sva, vm_size_t size)
1479 {
1480
1481 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1482 tlb_flush_range(sva, size);
1483 }
1484
1485 /*
1486 * Abuse the pte2 nodes for unmapped kva to thread a kva freelist through.
1487 * Requirements:
1488 * - Must deal with pages in order to ensure that none of the PTE2_* bits
1489 * are ever set, PTE2_V in particular.
1490 * - Assumes we can write to pte2s without pte2_store() atomic ops.
1491 * - Assumes nothing will ever test these addresses for 0 to indicate
1492 * no mapping instead of correctly checking PTE2_V.
1493 * - Assumes a vm_offset_t will fit in a pte2 (true for arm).
1494 * Because PTE2_V is never set, there can be no mappings to invalidate.
1495 */
1496 static vm_offset_t
pmap_pte2list_alloc(vm_offset_t * head)1497 pmap_pte2list_alloc(vm_offset_t *head)
1498 {
1499 pt2_entry_t *pte2p;
1500 vm_offset_t va;
1501
1502 va = *head;
1503 if (va == 0)
1504 panic("pmap_ptelist_alloc: exhausted ptelist KVA");
1505 pte2p = pt2map_entry(va);
1506 *head = *pte2p;
1507 if (*head & PTE2_V)
1508 panic("%s: va with PTE2_V set!", __func__);
1509 *pte2p = 0;
1510 return (va);
1511 }
1512
1513 static void
pmap_pte2list_free(vm_offset_t * head,vm_offset_t va)1514 pmap_pte2list_free(vm_offset_t *head, vm_offset_t va)
1515 {
1516 pt2_entry_t *pte2p;
1517
1518 if (va & PTE2_V)
1519 panic("%s: freeing va with PTE2_V set!", __func__);
1520 pte2p = pt2map_entry(va);
1521 *pte2p = *head; /* virtual! PTE2_V is 0 though */
1522 *head = va;
1523 }
1524
1525 static void
pmap_pte2list_init(vm_offset_t * head,void * base,int npages)1526 pmap_pte2list_init(vm_offset_t *head, void *base, int npages)
1527 {
1528 int i;
1529 vm_offset_t va;
1530
1531 *head = 0;
1532 for (i = npages - 1; i >= 0; i--) {
1533 va = (vm_offset_t)base + i * PAGE_SIZE;
1534 pmap_pte2list_free(head, va);
1535 }
1536 }
1537
1538 /*****************************************************************************
1539 *
1540 * PMAP third and final stage initialization.
1541 *
1542 * After pmap_init() is called, PMAP subsystem is fully initialized.
1543 *
1544 *****************************************************************************/
1545
1546 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1547 "VM/pmap parameters");
1548
1549 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
1550 "Max number of PV entries");
1551 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
1552 "Page share factor per proc");
1553
1554 static u_long nkpt2pg = NKPT2PG;
1555 SYSCTL_ULONG(_vm_pmap, OID_AUTO, nkpt2pg, CTLFLAG_RD,
1556 &nkpt2pg, 0, "Pre-allocated pages for kernel PT2s");
1557
1558 static int sp_enabled = 1;
1559 SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
1560 &sp_enabled, 0, "Are large page mappings enabled?");
1561
1562 bool
pmap_ps_enabled(pmap_t pmap __unused)1563 pmap_ps_enabled(pmap_t pmap __unused)
1564 {
1565
1566 return (sp_enabled != 0);
1567 }
1568
1569 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pte1, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1570 "1MB page mapping counters");
1571
1572 static u_long pmap_pte1_demotions;
1573 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, demotions, CTLFLAG_RD,
1574 &pmap_pte1_demotions, 0, "1MB page demotions");
1575
1576 static u_long pmap_pte1_mappings;
1577 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, mappings, CTLFLAG_RD,
1578 &pmap_pte1_mappings, 0, "1MB page mappings");
1579
1580 static u_long pmap_pte1_p_failures;
1581 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, p_failures, CTLFLAG_RD,
1582 &pmap_pte1_p_failures, 0, "1MB page promotion failures");
1583
1584 static u_long pmap_pte1_promotions;
1585 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD,
1586 &pmap_pte1_promotions, 0, "1MB page promotions");
1587
1588 static u_long pmap_pte1_kern_demotions;
1589 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD,
1590 &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions");
1591
1592 static u_long pmap_pte1_kern_promotions;
1593 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD,
1594 &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions");
1595
1596 static __inline ttb_entry_t
pmap_ttb_get(pmap_t pmap)1597 pmap_ttb_get(pmap_t pmap)
1598 {
1599
1600 return (vtophys(pmap->pm_pt1) | ttb_flags);
1601 }
1602
1603 /*
1604 * Initialize a vm_page's machine-dependent fields.
1605 *
1606 * Variations:
1607 * 1. Pages for L2 page tables are always not managed. So, pv_list and
1608 * pt2_wirecount can share same physical space. However, proper
1609 * initialization on a page alloc for page tables and reinitialization
1610 * on the page free must be ensured.
1611 */
1612 void
pmap_page_init(vm_page_t m)1613 pmap_page_init(vm_page_t m)
1614 {
1615
1616 TAILQ_INIT(&m->md.pv_list);
1617 pt2_wirecount_init(m);
1618 m->md.pat_mode = VM_MEMATTR_DEFAULT;
1619 }
1620
1621 /*
1622 * Virtualization for faster way how to zero whole page.
1623 */
1624 static __inline void
pagezero(void * page)1625 pagezero(void *page)
1626 {
1627
1628 bzero(page, PAGE_SIZE);
1629 }
1630
1631 /*
1632 * Zero L2 page table page.
1633 * Use same KVA as in pmap_zero_page().
1634 */
1635 static __inline vm_paddr_t
pmap_pt2pg_zero(vm_page_t m)1636 pmap_pt2pg_zero(vm_page_t m)
1637 {
1638 pt2_entry_t *cmap2_pte2p;
1639 vm_paddr_t pa;
1640 struct pcpu *pc;
1641
1642 pa = VM_PAGE_TO_PHYS(m);
1643
1644 /*
1645 * XXX: For now, we map whole page even if it's already zero,
1646 * to sync it even if the sync is only DSB.
1647 */
1648 sched_pin();
1649 pc = get_pcpu();
1650 cmap2_pte2p = pc->pc_cmap2_pte2p;
1651 mtx_lock(&pc->pc_cmap_lock);
1652 if (pte2_load(cmap2_pte2p) != 0)
1653 panic("%s: CMAP2 busy", __func__);
1654 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
1655 vm_page_pte2_attr(m)));
1656 /* Even VM_ALLOC_ZERO request is only advisory. */
1657 if ((m->flags & PG_ZERO) == 0)
1658 pagezero(pc->pc_cmap2_addr);
1659 pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE);
1660 pte2_clear(cmap2_pte2p);
1661 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
1662
1663 /*
1664 * Unpin the thread before releasing the lock. Otherwise the thread
1665 * could be rescheduled while still bound to the current CPU, only
1666 * to unpin itself immediately upon resuming execution.
1667 */
1668 sched_unpin();
1669 mtx_unlock(&pc->pc_cmap_lock);
1670
1671 return (pa);
1672 }
1673
1674 /*
1675 * Init just allocated page as L2 page table(s) holder
1676 * and return its physical address.
1677 */
1678 static __inline vm_paddr_t
pmap_pt2pg_init(pmap_t pmap,vm_offset_t va,vm_page_t m)1679 pmap_pt2pg_init(pmap_t pmap, vm_offset_t va, vm_page_t m)
1680 {
1681 vm_paddr_t pa;
1682 pt2_entry_t *pte2p;
1683
1684 /* Check page attributes. */
1685 if (m->md.pat_mode != pt_memattr)
1686 pmap_page_set_memattr(m, pt_memattr);
1687
1688 /* Zero page and init wire counts. */
1689 pa = pmap_pt2pg_zero(m);
1690 pt2_wirecount_init(m);
1691
1692 /*
1693 * Map page to PT2MAP address space for given pmap.
1694 * Note that PT2MAP space is shared with all pmaps.
1695 */
1696 if (pmap == kernel_pmap)
1697 pmap_kenter_pt2tab(va, PTE2_KPT(pa));
1698 else {
1699 pte2p = pmap_pt2tab_entry(pmap, va);
1700 pt2tab_store(pte2p, PTE2_KPT_NG(pa));
1701 }
1702
1703 return (pa);
1704 }
1705
1706 /*
1707 * Initialize the pmap module.
1708 *
1709 * Called by vm_mem_init(), to initialize any structures that the pmap system
1710 * needs to map virtual memory.
1711 */
1712 void
pmap_init(void)1713 pmap_init(void)
1714 {
1715 vm_size_t s;
1716 pt2_entry_t *pte2p, pte2;
1717 u_int i, pte1_idx, pv_npg;
1718
1719 PDEBUG(1, printf("%s: phys_start = %#x\n", __func__, PHYSADDR));
1720
1721 /*
1722 * Initialize the vm page array entries for kernel pmap's
1723 * L2 page table pages allocated in advance.
1724 */
1725 pte1_idx = pte1_index(KERNBASE - PT2MAP_SIZE);
1726 pte2p = kern_pt2tab_entry(KERNBASE - PT2MAP_SIZE);
1727 for (i = 0; i < nkpt2pg + NPG_IN_PT2TAB; i++, pte2p++) {
1728 vm_paddr_t pa;
1729 vm_page_t m;
1730
1731 pte2 = pte2_load(pte2p);
1732 KASSERT(pte2_is_valid(pte2), ("%s: no valid entry", __func__));
1733
1734 pa = pte2_pa(pte2);
1735 m = PHYS_TO_VM_PAGE(pa);
1736 KASSERT(m >= vm_page_array &&
1737 m < &vm_page_array[vm_page_array_size],
1738 ("%s: L2 page table page is out of range", __func__));
1739
1740 m->pindex = pte1_idx;
1741 m->phys_addr = pa;
1742 pte1_idx += NPT2_IN_PG;
1743 }
1744
1745 /*
1746 * Initialize the address space (zone) for the pv entries. Set a
1747 * high water mark so that the system can recover from excessive
1748 * numbers of pv entries.
1749 */
1750 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1751 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
1752 TUNABLE_INT_FETCH("vm.pmap.pv_entry_max", &pv_entry_max);
1753 pv_entry_max = roundup(pv_entry_max, _NPCPV);
1754 pv_entry_high_water = 9 * (pv_entry_max / 10);
1755
1756 /*
1757 * Are large page mappings enabled?
1758 */
1759 TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled);
1760 if (sp_enabled) {
1761 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1762 ("%s: can't assign to pagesizes[1]", __func__));
1763 pagesizes[1] = PTE1_SIZE;
1764 }
1765
1766 /*
1767 * Calculate the size of the pv head table for sections.
1768 * Handle the possibility that "vm_phys_segs[...].end" is zero.
1769 * Note that the table is only for sections which could be promoted.
1770 */
1771 first_managed_pa = pte1_trunc(vm_phys_segs[0].start);
1772 pv_npg = (pte1_trunc(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE)
1773 - first_managed_pa) / PTE1_SIZE + 1;
1774
1775 /*
1776 * Allocate memory for the pv head table for sections.
1777 */
1778 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1779 s = round_page(s);
1780 pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
1781 for (i = 0; i < pv_npg; i++)
1782 TAILQ_INIT(&pv_table[i].pv_list);
1783
1784 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
1785 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
1786 if (pv_chunkbase == NULL)
1787 panic("%s: not enough kvm for pv chunks", __func__);
1788 pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
1789 }
1790
1791 /*
1792 * Add a list of wired pages to the kva
1793 * this routine is only used for temporary
1794 * kernel mappings that do not need to have
1795 * page modification or references recorded.
1796 * Note that old mappings are simply written
1797 * over. The page *must* be wired.
1798 * Note: SMP coherent. Uses a ranged shootdown IPI.
1799 */
1800 void
pmap_qenter(vm_offset_t sva,vm_page_t * ma,int count)1801 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1802 {
1803 u_int anychanged;
1804 pt2_entry_t *epte2p, *pte2p, pte2;
1805 vm_page_t m;
1806 vm_paddr_t pa;
1807
1808 anychanged = 0;
1809 pte2p = pt2map_entry(sva);
1810 epte2p = pte2p + count;
1811 while (pte2p < epte2p) {
1812 m = *ma++;
1813 pa = VM_PAGE_TO_PHYS(m);
1814 pte2 = pte2_load(pte2p);
1815 if ((pte2_pa(pte2) != pa) ||
1816 (pte2_attr(pte2) != vm_page_pte2_attr(m))) {
1817 anychanged++;
1818 pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW,
1819 vm_page_pte2_attr(m)));
1820 }
1821 pte2p++;
1822 }
1823 if (__predict_false(anychanged))
1824 tlb_flush_range(sva, count * PAGE_SIZE);
1825 }
1826
1827 /*
1828 * This routine tears out page mappings from the
1829 * kernel -- it is meant only for temporary mappings.
1830 * Note: SMP coherent. Uses a ranged shootdown IPI.
1831 */
1832 void
pmap_qremove(vm_offset_t sva,int count)1833 pmap_qremove(vm_offset_t sva, int count)
1834 {
1835 vm_offset_t va;
1836
1837 va = sva;
1838 while (count-- > 0) {
1839 pmap_kremove(va);
1840 va += PAGE_SIZE;
1841 }
1842 tlb_flush_range(sva, va - sva);
1843 }
1844
1845 /*
1846 * Are we current address space or kernel?
1847 */
1848 static __inline int
pmap_is_current(pmap_t pmap)1849 pmap_is_current(pmap_t pmap)
1850 {
1851
1852 return (pmap == kernel_pmap ||
1853 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace)));
1854 }
1855
1856 /*
1857 * If the given pmap is not the current or kernel pmap, the returned
1858 * pte2 must be released by passing it to pmap_pte2_release().
1859 */
1860 static pt2_entry_t *
pmap_pte2(pmap_t pmap,vm_offset_t va)1861 pmap_pte2(pmap_t pmap, vm_offset_t va)
1862 {
1863 pt1_entry_t pte1;
1864 vm_paddr_t pt2pg_pa;
1865
1866 pte1 = pte1_load(pmap_pte1(pmap, va));
1867 if (pte1_is_section(pte1))
1868 panic("%s: attempt to map PTE1", __func__);
1869 if (pte1_is_link(pte1)) {
1870 /* Are we current address space or kernel? */
1871 if (pmap_is_current(pmap))
1872 return (pt2map_entry(va));
1873 /* Note that L2 page table size is not equal to PAGE_SIZE. */
1874 pt2pg_pa = trunc_page(pte1_link_pa(pte1));
1875 mtx_lock(&PMAP2mutex);
1876 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) {
1877 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa));
1878 tlb_flush((vm_offset_t)PADDR2);
1879 }
1880 return (PADDR2 + (arm32_btop(va) & (NPTE2_IN_PG - 1)));
1881 }
1882 return (NULL);
1883 }
1884
1885 /*
1886 * Releases a pte2 that was obtained from pmap_pte2().
1887 * Be prepared for the pte2p being NULL.
1888 */
1889 static __inline void
pmap_pte2_release(pt2_entry_t * pte2p)1890 pmap_pte2_release(pt2_entry_t *pte2p)
1891 {
1892
1893 if ((pt2_entry_t *)(trunc_page((vm_offset_t)pte2p)) == PADDR2) {
1894 mtx_unlock(&PMAP2mutex);
1895 }
1896 }
1897
1898 /*
1899 * Super fast pmap_pte2 routine best used when scanning
1900 * the pv lists. This eliminates many coarse-grained
1901 * invltlb calls. Note that many of the pv list
1902 * scans are across different pmaps. It is very wasteful
1903 * to do an entire tlb flush for checking a single mapping.
1904 *
1905 * If the given pmap is not the current pmap, pvh_global_lock
1906 * must be held and curthread pinned to a CPU.
1907 */
1908 static pt2_entry_t *
pmap_pte2_quick(pmap_t pmap,vm_offset_t va)1909 pmap_pte2_quick(pmap_t pmap, vm_offset_t va)
1910 {
1911 pt1_entry_t pte1;
1912 vm_paddr_t pt2pg_pa;
1913
1914 pte1 = pte1_load(pmap_pte1(pmap, va));
1915 if (pte1_is_section(pte1))
1916 panic("%s: attempt to map PTE1", __func__);
1917 if (pte1_is_link(pte1)) {
1918 /* Are we current address space or kernel? */
1919 if (pmap_is_current(pmap))
1920 return (pt2map_entry(va));
1921 rw_assert(&pvh_global_lock, RA_WLOCKED);
1922 KASSERT(curthread->td_pinned > 0,
1923 ("%s: curthread not pinned", __func__));
1924 /* Note that L2 page table size is not equal to PAGE_SIZE. */
1925 pt2pg_pa = trunc_page(pte1_link_pa(pte1));
1926 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) {
1927 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa));
1928 #ifdef SMP
1929 PMAP1cpu = PCPU_GET(cpuid);
1930 #endif
1931 tlb_flush_local((vm_offset_t)PADDR1);
1932 PMAP1changed++;
1933 } else
1934 #ifdef SMP
1935 if (PMAP1cpu != PCPU_GET(cpuid)) {
1936 PMAP1cpu = PCPU_GET(cpuid);
1937 tlb_flush_local((vm_offset_t)PADDR1);
1938 PMAP1changedcpu++;
1939 } else
1940 #endif
1941 PMAP1unchanged++;
1942 return (PADDR1 + (arm32_btop(va) & (NPTE2_IN_PG - 1)));
1943 }
1944 return (NULL);
1945 }
1946
1947 /*
1948 * Routine: pmap_extract
1949 * Function:
1950 * Extract the physical page address associated
1951 * with the given map/virtual_address pair.
1952 */
1953 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)1954 pmap_extract(pmap_t pmap, vm_offset_t va)
1955 {
1956 vm_paddr_t pa;
1957 pt1_entry_t pte1;
1958 pt2_entry_t *pte2p;
1959
1960 PMAP_LOCK(pmap);
1961 pte1 = pte1_load(pmap_pte1(pmap, va));
1962 if (pte1_is_section(pte1))
1963 pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1964 else if (pte1_is_link(pte1)) {
1965 pte2p = pmap_pte2(pmap, va);
1966 pa = pte2_pa(pte2_load(pte2p)) | (va & PTE2_OFFSET);
1967 pmap_pte2_release(pte2p);
1968 } else
1969 pa = 0;
1970 PMAP_UNLOCK(pmap);
1971 return (pa);
1972 }
1973
1974 /*
1975 * Routine: pmap_extract_and_hold
1976 * Function:
1977 * Atomically extract and hold the physical page
1978 * with the given pmap and virtual address pair
1979 * if that mapping permits the given protection.
1980 */
1981 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)1982 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1983 {
1984 vm_paddr_t pa;
1985 pt1_entry_t pte1;
1986 pt2_entry_t pte2, *pte2p;
1987 vm_page_t m;
1988
1989 m = NULL;
1990 PMAP_LOCK(pmap);
1991 pte1 = pte1_load(pmap_pte1(pmap, va));
1992 if (pte1_is_section(pte1)) {
1993 if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) {
1994 pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
1995 m = PHYS_TO_VM_PAGE(pa);
1996 if (!vm_page_wire_mapped(m))
1997 m = NULL;
1998 }
1999 } else if (pte1_is_link(pte1)) {
2000 pte2p = pmap_pte2(pmap, va);
2001 pte2 = pte2_load(pte2p);
2002 pmap_pte2_release(pte2p);
2003 if (pte2_is_valid(pte2) &&
2004 (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) {
2005 pa = pte2_pa(pte2);
2006 m = PHYS_TO_VM_PAGE(pa);
2007 if (!vm_page_wire_mapped(m))
2008 m = NULL;
2009 }
2010 }
2011 PMAP_UNLOCK(pmap);
2012 return (m);
2013 }
2014
2015 /*
2016 * Grow the number of kernel L2 page table entries, if needed.
2017 */
2018 void
pmap_growkernel(vm_offset_t addr)2019 pmap_growkernel(vm_offset_t addr)
2020 {
2021 vm_page_t m;
2022 vm_paddr_t pt2pg_pa, pt2_pa;
2023 pt1_entry_t pte1;
2024 pt2_entry_t pte2;
2025
2026 PDEBUG(1, printf("%s: addr = %#x\n", __func__, addr));
2027 /*
2028 * All the time kernel_vm_end is first KVA for which underlying
2029 * L2 page table is either not allocated or linked from L1 page table
2030 * (not considering sections). Except for two possible cases:
2031 *
2032 * (1) in the very beginning as long as pmap_growkernel() was
2033 * not called, it could be first unused KVA (which is not
2034 * rounded up to PTE1_SIZE),
2035 *
2036 * (2) when all KVA space is mapped and vm_map_max(kernel_map)
2037 * address is not rounded up to PTE1_SIZE. (For example,
2038 * it could be 0xFFFFFFFF.)
2039 */
2040 kernel_vm_end = pte1_roundup(kernel_vm_end);
2041 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
2042 addr = roundup2(addr, PTE1_SIZE);
2043 if (addr - 1 >= vm_map_max(kernel_map))
2044 addr = vm_map_max(kernel_map);
2045 while (kernel_vm_end < addr) {
2046 pte1 = pte1_load(kern_pte1(kernel_vm_end));
2047 if (pte1_is_valid(pte1)) {
2048 kernel_vm_end += PTE1_SIZE;
2049 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2050 kernel_vm_end = vm_map_max(kernel_map);
2051 break;
2052 }
2053 continue;
2054 }
2055
2056 /*
2057 * kernel_vm_end_new is used in pmap_pinit() when kernel
2058 * mappings are entered to new pmap all at once to avoid race
2059 * between pmap_kenter_pte1() and kernel_vm_end increase.
2060 * The same aplies to pmap_kenter_pt2tab().
2061 */
2062 kernel_vm_end_new = kernel_vm_end + PTE1_SIZE;
2063
2064 pte2 = pt2tab_load(kern_pt2tab_entry(kernel_vm_end));
2065 if (!pte2_is_valid(pte2)) {
2066 /*
2067 * Install new PT2s page into kernel PT2TAB.
2068 */
2069 m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
2070 VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2071 if (m == NULL)
2072 panic("%s: no memory to grow kernel", __func__);
2073 m->pindex = pte1_index(kernel_vm_end) & ~PT2PG_MASK;
2074
2075 /*
2076 * QQQ: To link all new L2 page tables from L1 page
2077 * table now and so pmap_kenter_pte1() them
2078 * at once together with pmap_kenter_pt2tab()
2079 * could be nice speed up. However,
2080 * pmap_growkernel() does not happen so often...
2081 * QQQ: The other TTBR is another option.
2082 */
2083 pt2pg_pa = pmap_pt2pg_init(kernel_pmap, kernel_vm_end,
2084 m);
2085 } else
2086 pt2pg_pa = pte2_pa(pte2);
2087
2088 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(kernel_vm_end));
2089 pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa));
2090
2091 kernel_vm_end = kernel_vm_end_new;
2092 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
2093 kernel_vm_end = vm_map_max(kernel_map);
2094 break;
2095 }
2096 }
2097 }
2098
2099 static int
kvm_size(SYSCTL_HANDLER_ARGS)2100 kvm_size(SYSCTL_HANDLER_ARGS)
2101 {
2102 unsigned long ksize = vm_max_kernel_address - KERNBASE;
2103
2104 return (sysctl_handle_long(oidp, &ksize, 0, req));
2105 }
2106 SYSCTL_PROC(_vm, OID_AUTO, kvm_size,
2107 CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0, kvm_size, "IU",
2108 "Size of KVM");
2109
2110 static int
kvm_free(SYSCTL_HANDLER_ARGS)2111 kvm_free(SYSCTL_HANDLER_ARGS)
2112 {
2113 unsigned long kfree = vm_max_kernel_address - kernel_vm_end;
2114
2115 return (sysctl_handle_long(oidp, &kfree, 0, req));
2116 }
2117 SYSCTL_PROC(_vm, OID_AUTO, kvm_free,
2118 CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0, kvm_free, "IU",
2119 "Amount of KVM free");
2120
2121 /***********************************************
2122 *
2123 * Pmap allocation/deallocation routines.
2124 *
2125 ***********************************************/
2126
2127 /*
2128 * Initialize the pmap for the swapper process.
2129 */
2130 void
pmap_pinit0(pmap_t pmap)2131 pmap_pinit0(pmap_t pmap)
2132 {
2133 PDEBUG(1, printf("%s: pmap = %p\n", __func__, pmap));
2134
2135 PMAP_LOCK_INIT(pmap);
2136
2137 /*
2138 * Kernel page table directory and pmap stuff around is already
2139 * initialized, we are using it right now and here. So, finish
2140 * only PMAP structures initialization for process0 ...
2141 *
2142 * Since the L1 page table and PT2TAB is shared with the kernel pmap,
2143 * which is already included in the list "allpmaps", this pmap does
2144 * not need to be inserted into that list.
2145 */
2146 pmap->pm_pt1 = kern_pt1;
2147 pmap->pm_pt2tab = kern_pt2tab;
2148 CPU_ZERO(&pmap->pm_active);
2149 PCPU_SET(curpmap, pmap);
2150 TAILQ_INIT(&pmap->pm_pvchunk);
2151 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2152 CPU_SET(0, &pmap->pm_active);
2153 }
2154
2155 static __inline void
pte1_copy_nosync(pt1_entry_t * spte1p,pt1_entry_t * dpte1p,vm_offset_t sva,vm_offset_t eva)2156 pte1_copy_nosync(pt1_entry_t *spte1p, pt1_entry_t *dpte1p, vm_offset_t sva,
2157 vm_offset_t eva)
2158 {
2159 u_int idx, count;
2160
2161 idx = pte1_index(sva);
2162 count = (pte1_index(eva) - idx + 1) * sizeof(pt1_entry_t);
2163 bcopy(spte1p + idx, dpte1p + idx, count);
2164 }
2165
2166 static __inline void
pt2tab_copy_nosync(pt2_entry_t * spte2p,pt2_entry_t * dpte2p,vm_offset_t sva,vm_offset_t eva)2167 pt2tab_copy_nosync(pt2_entry_t *spte2p, pt2_entry_t *dpte2p, vm_offset_t sva,
2168 vm_offset_t eva)
2169 {
2170 u_int idx, count;
2171
2172 idx = pt2tab_index(sva);
2173 count = (pt2tab_index(eva) - idx + 1) * sizeof(pt2_entry_t);
2174 bcopy(spte2p + idx, dpte2p + idx, count);
2175 }
2176
2177 /*
2178 * Initialize a preallocated and zeroed pmap structure,
2179 * such as one in a vmspace structure.
2180 */
2181 int
pmap_pinit(pmap_t pmap)2182 pmap_pinit(pmap_t pmap)
2183 {
2184 pt1_entry_t *pte1p;
2185 pt2_entry_t *pte2p;
2186 vm_paddr_t pa, pt2tab_pa;
2187 u_int i;
2188
2189 PDEBUG(6, printf("%s: pmap = %p, pm_pt1 = %p\n", __func__, pmap,
2190 pmap->pm_pt1));
2191
2192 /*
2193 * No need to allocate L2 page table space yet but we do need
2194 * a valid L1 page table and PT2TAB table.
2195 *
2196 * Install shared kernel mappings to these tables. It's a little
2197 * tricky as some parts of KVA are reserved for vectors, devices,
2198 * and whatever else. These parts are supposed to be above
2199 * vm_max_kernel_address. Thus two regions should be installed:
2200 *
2201 * (1) <KERNBASE, kernel_vm_end),
2202 * (2) <vm_max_kernel_address, 0xFFFFFFFF>.
2203 *
2204 * QQQ: The second region should be stable enough to be installed
2205 * only once in time when the tables are allocated.
2206 * QQQ: Maybe copy of both regions at once could be faster ...
2207 * QQQ: Maybe the other TTBR is an option.
2208 *
2209 * Finally, install own PT2TAB table to these tables.
2210 */
2211
2212 if (pmap->pm_pt1 == NULL) {
2213 pmap->pm_pt1 = kmem_alloc_contig(NB_IN_PT1,
2214 M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr);
2215 if (pmap->pm_pt1 == NULL)
2216 return (0);
2217 }
2218 if (pmap->pm_pt2tab == NULL) {
2219 /*
2220 * QQQ: (1) PT2TAB must be contiguous. If PT2TAB is one page
2221 * only, what should be the only size for 32 bit systems,
2222 * then we could allocate it with vm_page_alloc() and all
2223 * the stuff needed as other L2 page table pages.
2224 * (2) Note that a process PT2TAB is special L2 page table
2225 * page. Its mapping in kernel_arena is permanent and can
2226 * be used no matter which process is current. Its mapping
2227 * in PT2MAP can be used only for current process.
2228 */
2229 pmap->pm_pt2tab = kmem_alloc_attr(NB_IN_PT2TAB,
2230 M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr);
2231 if (pmap->pm_pt2tab == NULL) {
2232 /*
2233 * QQQ: As struct pmap is allocated from UMA with
2234 * UMA_ZONE_NOFREE flag, it's important to leave
2235 * no allocation in pmap if initialization failed.
2236 */
2237 kmem_free(pmap->pm_pt1, NB_IN_PT1);
2238 pmap->pm_pt1 = NULL;
2239 return (0);
2240 }
2241 /*
2242 * QQQ: Each L2 page table page vm_page_t has pindex set to
2243 * pte1 index of virtual address mapped by this page.
2244 * It's not valid for non kernel PT2TABs themselves.
2245 * The pindex of these pages can not be altered because
2246 * of the way how they are allocated now. However, it
2247 * should not be a problem.
2248 */
2249 }
2250
2251 mtx_lock_spin(&allpmaps_lock);
2252 /*
2253 * To avoid race with pmap_kenter_pte1() and pmap_kenter_pt2tab(),
2254 * kernel_vm_end_new is used here instead of kernel_vm_end.
2255 */
2256 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, KERNBASE,
2257 kernel_vm_end_new - 1);
2258 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, vm_max_kernel_address,
2259 0xFFFFFFFF);
2260 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, KERNBASE,
2261 kernel_vm_end_new - 1);
2262 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, vm_max_kernel_address,
2263 0xFFFFFFFF);
2264 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
2265 mtx_unlock_spin(&allpmaps_lock);
2266
2267 /*
2268 * Store PT2MAP PT2 pages (a.k.a. PT2TAB) in PT2TAB itself.
2269 * I.e. self reference mapping. The PT2TAB is private, however mapped
2270 * into shared PT2MAP space, so the mapping should be not global.
2271 */
2272 pt2tab_pa = vtophys(pmap->pm_pt2tab);
2273 pte2p = pmap_pt2tab_entry(pmap, (vm_offset_t)PT2MAP);
2274 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) {
2275 pt2tab_store(pte2p++, PTE2_KPT_NG(pa));
2276 }
2277
2278 /* Insert PT2MAP PT2s into pmap PT1. */
2279 pte1p = pmap_pte1(pmap, (vm_offset_t)PT2MAP);
2280 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) {
2281 pte1_store(pte1p++, PTE1_LINK(pa));
2282 }
2283
2284 /*
2285 * Now synchronize new mapping which was made above.
2286 */
2287 pte1_sync_range(pmap->pm_pt1, NB_IN_PT1);
2288 pte2_sync_range(pmap->pm_pt2tab, NB_IN_PT2TAB);
2289
2290 CPU_ZERO(&pmap->pm_active);
2291 TAILQ_INIT(&pmap->pm_pvchunk);
2292 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2293
2294 return (1);
2295 }
2296
2297 #ifdef INVARIANTS
2298 static bool
pt2tab_user_is_empty(pt2_entry_t * tab)2299 pt2tab_user_is_empty(pt2_entry_t *tab)
2300 {
2301 u_int i, end;
2302
2303 end = pt2tab_index(VM_MAXUSER_ADDRESS);
2304 for (i = 0; i < end; i++)
2305 if (tab[i] != 0) return (false);
2306 return (true);
2307 }
2308 #endif
2309 /*
2310 * Release any resources held by the given physical map.
2311 * Called when a pmap initialized by pmap_pinit is being released.
2312 * Should only be called if the map contains no valid mappings.
2313 */
2314 void
pmap_release(pmap_t pmap)2315 pmap_release(pmap_t pmap)
2316 {
2317 #ifdef INVARIANTS
2318 vm_offset_t start, end;
2319 #endif
2320 KASSERT(pmap->pm_stats.resident_count == 0,
2321 ("%s: pmap resident count %ld != 0", __func__,
2322 pmap->pm_stats.resident_count));
2323 KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab),
2324 ("%s: has allocated user PT2(s)", __func__));
2325 KASSERT(CPU_EMPTY(&pmap->pm_active),
2326 ("%s: pmap %p is active on some CPU(s)", __func__, pmap));
2327
2328 mtx_lock_spin(&allpmaps_lock);
2329 LIST_REMOVE(pmap, pm_list);
2330 mtx_unlock_spin(&allpmaps_lock);
2331
2332 #ifdef INVARIANTS
2333 start = pte1_index(KERNBASE) * sizeof(pt1_entry_t);
2334 end = (pte1_index(0xFFFFFFFF) + 1) * sizeof(pt1_entry_t);
2335 bzero((char *)pmap->pm_pt1 + start, end - start);
2336
2337 start = pt2tab_index(KERNBASE) * sizeof(pt2_entry_t);
2338 end = (pt2tab_index(0xFFFFFFFF) + 1) * sizeof(pt2_entry_t);
2339 bzero((char *)pmap->pm_pt2tab + start, end - start);
2340 #endif
2341 /*
2342 * We are leaving PT1 and PT2TAB allocated on released pmap,
2343 * so hopefully UMA vmspace_zone will always be inited with
2344 * UMA_ZONE_NOFREE flag.
2345 */
2346 }
2347
2348 /*********************************************************
2349 *
2350 * L2 table pages and their pages management routines.
2351 *
2352 *********************************************************/
2353
2354 /*
2355 * Virtual interface for L2 page table wire counting.
2356 *
2357 * Each L2 page table in a page has own counter which counts a number of
2358 * valid mappings in a table. Global page counter counts mappings in all
2359 * tables in a page plus a single itself mapping in PT2TAB.
2360 *
2361 * During a promotion we leave the associated L2 page table counter
2362 * untouched, so the table (strictly speaking a page which holds it)
2363 * is never freed if promoted.
2364 *
2365 * If a page m->ref_count == 1 then no valid mappings exist in any L2 page
2366 * table in the page and the page itself is only mapped in PT2TAB.
2367 */
2368
2369 static __inline void
pt2_wirecount_init(vm_page_t m)2370 pt2_wirecount_init(vm_page_t m)
2371 {
2372 u_int i;
2373
2374 /*
2375 * Note: A page m is allocated with VM_ALLOC_WIRED flag and
2376 * m->ref_count should be already set correctly.
2377 * So, there is no need to set it again herein.
2378 */
2379 for (i = 0; i < NPT2_IN_PG; i++)
2380 m->md.pt2_wirecount[i] = 0;
2381 }
2382
2383 static __inline void
pt2_wirecount_inc(vm_page_t m,uint32_t pte1_idx)2384 pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx)
2385 {
2386
2387 /*
2388 * Note: A just modificated pte2 (i.e. already allocated)
2389 * is acquiring one extra reference which must be
2390 * explicitly cleared. It influences the KASSERTs herein.
2391 * All L2 page tables in a page always belong to the same
2392 * pmap, so we allow only one extra reference for the page.
2393 */
2394 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1),
2395 ("%s: PT2 is overflowing ...", __func__));
2396 KASSERT(m->ref_count <= (NPTE2_IN_PG + 1),
2397 ("%s: PT2PG is overflowing ...", __func__));
2398
2399 m->ref_count++;
2400 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++;
2401 }
2402
2403 static __inline void
pt2_wirecount_dec(vm_page_t m,uint32_t pte1_idx)2404 pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx)
2405 {
2406
2407 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0,
2408 ("%s: PT2 is underflowing ...", __func__));
2409 KASSERT(m->ref_count > 1,
2410 ("%s: PT2PG is underflowing ...", __func__));
2411
2412 m->ref_count--;
2413 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--;
2414 }
2415
2416 static __inline void
pt2_wirecount_set(vm_page_t m,uint32_t pte1_idx,uint16_t count)2417 pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count)
2418 {
2419
2420 KASSERT(count <= NPTE2_IN_PT2,
2421 ("%s: invalid count %u", __func__, count));
2422 KASSERT(m->ref_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK],
2423 ("%s: PT2PG corrupting (%u, %u) ...", __func__, m->ref_count,
2424 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]));
2425
2426 m->ref_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK];
2427 m->ref_count += count;
2428 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count;
2429
2430 KASSERT(m->ref_count <= (NPTE2_IN_PG + 1),
2431 ("%s: PT2PG is overflowed (%u) ...", __func__, m->ref_count));
2432 }
2433
2434 static __inline uint32_t
pt2_wirecount_get(vm_page_t m,uint32_t pte1_idx)2435 pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx)
2436 {
2437
2438 return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]);
2439 }
2440
2441 static __inline bool
pt2_is_empty(vm_page_t m,vm_offset_t va)2442 pt2_is_empty(vm_page_t m, vm_offset_t va)
2443 {
2444
2445 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0);
2446 }
2447
2448 static __inline bool
pt2_is_full(vm_page_t m,vm_offset_t va)2449 pt2_is_full(vm_page_t m, vm_offset_t va)
2450 {
2451
2452 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] ==
2453 NPTE2_IN_PT2);
2454 }
2455
2456 static __inline bool
pt2pg_is_empty(vm_page_t m)2457 pt2pg_is_empty(vm_page_t m)
2458 {
2459
2460 return (m->ref_count == 1);
2461 }
2462
2463 /*
2464 * This routine is called if the L2 page table
2465 * is not mapped correctly.
2466 */
2467 static vm_page_t
_pmap_allocpte2(pmap_t pmap,vm_offset_t va,u_int flags)2468 _pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags)
2469 {
2470 uint32_t pte1_idx;
2471 pt1_entry_t *pte1p;
2472 pt2_entry_t pte2;
2473 vm_page_t m;
2474 vm_paddr_t pt2pg_pa, pt2_pa;
2475
2476 pte1_idx = pte1_index(va);
2477 pte1p = pmap->pm_pt1 + pte1_idx;
2478
2479 KASSERT(pte1_load(pte1p) == 0,
2480 ("%s: pm_pt1[%#x] is not zero: %#x", __func__, pte1_idx,
2481 pte1_load(pte1p)));
2482
2483 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, va));
2484 if (!pte2_is_valid(pte2)) {
2485 /*
2486 * Install new PT2s page into pmap PT2TAB.
2487 */
2488 m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2489 if (m == NULL) {
2490 if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
2491 PMAP_UNLOCK(pmap);
2492 rw_wunlock(&pvh_global_lock);
2493 vm_wait(NULL);
2494 rw_wlock(&pvh_global_lock);
2495 PMAP_LOCK(pmap);
2496 }
2497
2498 /*
2499 * Indicate the need to retry. While waiting,
2500 * the L2 page table page may have been allocated.
2501 */
2502 return (NULL);
2503 }
2504 m->pindex = pte1_idx & ~PT2PG_MASK;
2505 pmap->pm_stats.resident_count++;
2506 pt2pg_pa = pmap_pt2pg_init(pmap, va, m);
2507 } else {
2508 pt2pg_pa = pte2_pa(pte2);
2509 m = PHYS_TO_VM_PAGE(pt2pg_pa);
2510 }
2511
2512 pt2_wirecount_inc(m, pte1_idx);
2513 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx);
2514 pte1_store(pte1p, PTE1_LINK(pt2_pa));
2515
2516 return (m);
2517 }
2518
2519 static vm_page_t
pmap_allocpte2(pmap_t pmap,vm_offset_t va,u_int flags)2520 pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags)
2521 {
2522 u_int pte1_idx;
2523 pt1_entry_t *pte1p, pte1;
2524 vm_page_t m;
2525
2526 pte1_idx = pte1_index(va);
2527 retry:
2528 pte1p = pmap->pm_pt1 + pte1_idx;
2529 pte1 = pte1_load(pte1p);
2530
2531 /*
2532 * This supports switching from a 1MB page to a
2533 * normal 4K page.
2534 */
2535 if (pte1_is_section(pte1)) {
2536 (void)pmap_demote_pte1(pmap, pte1p, va);
2537 /*
2538 * Reload pte1 after demotion.
2539 *
2540 * Note: Demotion can even fail as either PT2 is not find for
2541 * the virtual address or PT2PG can not be allocated.
2542 */
2543 pte1 = pte1_load(pte1p);
2544 }
2545
2546 /*
2547 * If the L2 page table page is mapped, we just increment the
2548 * hold count, and activate it.
2549 */
2550 if (pte1_is_link(pte1)) {
2551 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
2552 pt2_wirecount_inc(m, pte1_idx);
2553 } else {
2554 /*
2555 * Here if the PT2 isn't mapped, or if it has
2556 * been deallocated.
2557 */
2558 m = _pmap_allocpte2(pmap, va, flags);
2559 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
2560 goto retry;
2561 }
2562
2563 return (m);
2564 }
2565
2566 /*
2567 * Schedule the specified unused L2 page table page to be freed. Specifically,
2568 * add the page to the specified list of pages that will be released to the
2569 * physical memory manager after the TLB has been updated.
2570 */
2571 static __inline void
pmap_add_delayed_free_list(vm_page_t m,struct spglist * free)2572 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free)
2573 {
2574
2575 /*
2576 * Put page on a list so that it is released after
2577 * *ALL* TLB shootdown is done
2578 */
2579 #ifdef PMAP_DEBUG
2580 pmap_zero_page_check(m);
2581 #endif
2582 m->flags |= PG_ZERO;
2583 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
2584 }
2585
2586 /*
2587 * Unwire L2 page tables page.
2588 */
2589 static void
pmap_unwire_pt2pg(pmap_t pmap,vm_offset_t va,vm_page_t m)2590 pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m)
2591 {
2592 pt1_entry_t *pte1p, opte1 __unused;
2593 pt2_entry_t *pte2p;
2594 uint32_t i;
2595
2596 KASSERT(pt2pg_is_empty(m),
2597 ("%s: pmap %p PT2PG %p wired", __func__, pmap, m));
2598
2599 /*
2600 * Unmap all L2 page tables in the page from L1 page table.
2601 *
2602 * QQQ: Individual L2 page tables (except the last one) can be unmapped
2603 * earlier. However, we are doing that this way.
2604 */
2605 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK),
2606 ("%s: pmap %p va %#x PT2PG %p bad index", __func__, pmap, va, m));
2607 pte1p = pmap->pm_pt1 + m->pindex;
2608 for (i = 0; i < NPT2_IN_PG; i++, pte1p++) {
2609 KASSERT(m->md.pt2_wirecount[i] == 0,
2610 ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m));
2611 opte1 = pte1_load(pte1p);
2612 if (pte1_is_link(opte1)) {
2613 pte1_clear(pte1p);
2614 /*
2615 * Flush intermediate TLB cache.
2616 */
2617 pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT);
2618 }
2619 #ifdef INVARIANTS
2620 else
2621 KASSERT((opte1 == 0) || pte1_is_section(opte1),
2622 ("%s: pmap %p va %#x bad pte1 %x at %u", __func__,
2623 pmap, va, opte1, i));
2624 #endif
2625 }
2626
2627 /*
2628 * Unmap the page from PT2TAB.
2629 */
2630 pte2p = pmap_pt2tab_entry(pmap, va);
2631 (void)pt2tab_load_clear(pte2p);
2632 pmap_tlb_flush(pmap, pt2map_pt2pg(va));
2633
2634 m->ref_count = 0;
2635 pmap->pm_stats.resident_count--;
2636
2637 /*
2638 * This barrier is so that the ordinary store unmapping
2639 * the L2 page table page is globally performed before TLB shoot-
2640 * down is begun.
2641 */
2642 wmb();
2643 vm_wire_sub(1);
2644 }
2645
2646 /*
2647 * Decrements a L2 page table page's wire count, which is used to record the
2648 * number of valid page table entries within the page. If the wire count
2649 * drops to zero, then the page table page is unmapped. Returns true if the
2650 * page table page was unmapped and false otherwise.
2651 */
2652 static __inline bool
pmap_unwire_pt2(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)2653 pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2654 {
2655 pt2_wirecount_dec(m, pte1_index(va));
2656 if (pt2pg_is_empty(m)) {
2657 /*
2658 * QQQ: Wire count is zero, so whole page should be zero and
2659 * we can set PG_ZERO flag to it.
2660 * Note that when promotion is enabled, it takes some
2661 * more efforts. See pmap_unwire_pt2_all() below.
2662 */
2663 pmap_unwire_pt2pg(pmap, va, m);
2664 pmap_add_delayed_free_list(m, free);
2665 return (true);
2666 } else
2667 return (false);
2668 }
2669
2670 /*
2671 * Drop a L2 page table page's wire count at once, which is used to record
2672 * the number of valid L2 page table entries within the page. If the wire
2673 * count drops to zero, then the L2 page table page is unmapped.
2674 */
2675 static __inline void
pmap_unwire_pt2_all(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)2676 pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m,
2677 struct spglist *free)
2678 {
2679 u_int pte1_idx = pte1_index(va);
2680
2681 KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK),
2682 ("%s: PT2 page's pindex is wrong", __func__));
2683 KASSERT(m->ref_count > pt2_wirecount_get(m, pte1_idx),
2684 ("%s: bad pt2 wire count %u > %u", __func__, m->ref_count,
2685 pt2_wirecount_get(m, pte1_idx)));
2686
2687 /*
2688 * It's possible that the L2 page table was never used.
2689 * It happened in case that a section was created without promotion.
2690 */
2691 if (pt2_is_full(m, va)) {
2692 pt2_wirecount_set(m, pte1_idx, 0);
2693
2694 /*
2695 * QQQ: We clear L2 page table now, so when L2 page table page
2696 * is going to be freed, we can set it PG_ZERO flag ...
2697 * This function is called only on section mappings, so
2698 * hopefully it's not to big overload.
2699 *
2700 * XXX: If pmap is current, existing PT2MAP mapping could be
2701 * used for zeroing.
2702 */
2703 pmap_zero_page_area(m, page_pt2off(pte1_idx), NB_IN_PT2);
2704 }
2705 #ifdef INVARIANTS
2706 else
2707 KASSERT(pt2_is_empty(m, va), ("%s: PT2 is not empty (%u)",
2708 __func__, pt2_wirecount_get(m, pte1_idx)));
2709 #endif
2710 if (pt2pg_is_empty(m)) {
2711 pmap_unwire_pt2pg(pmap, va, m);
2712 pmap_add_delayed_free_list(m, free);
2713 }
2714 }
2715
2716 /*
2717 * After removing a L2 page table entry, this routine is used to
2718 * conditionally free the page, and manage the hold/wire counts.
2719 */
2720 static bool
pmap_unuse_pt2(pmap_t pmap,vm_offset_t va,struct spglist * free)2721 pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free)
2722 {
2723 pt1_entry_t pte1;
2724 vm_page_t mpte;
2725
2726 if (va >= VM_MAXUSER_ADDRESS)
2727 return (false);
2728 pte1 = pte1_load(pmap_pte1(pmap, va));
2729 mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
2730 return (pmap_unwire_pt2(pmap, va, mpte, free));
2731 }
2732
2733 /*************************************
2734 *
2735 * Page management routines.
2736 *
2737 *************************************/
2738
2739 static const uint32_t pc_freemask[_NPCM] = {
2740 [0 ... _NPCM - 2] = PC_FREEN,
2741 [_NPCM - 1] = PC_FREEL
2742 };
2743
2744 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2745 "Current number of pv entries");
2746
2747 #ifdef PV_STATS
2748 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2749
2750 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2751 "Current number of pv entry chunks");
2752 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2753 "Current number of pv entry chunks allocated");
2754 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2755 "Current number of pv entry chunks frees");
2756 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail,
2757 0, "Number of times tried to get a chunk page but failed.");
2758
2759 static long pv_entry_frees, pv_entry_allocs;
2760 static int pv_entry_spare;
2761
2762 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2763 "Current number of pv entry frees");
2764 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs,
2765 0, "Current number of pv entry allocs");
2766 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2767 "Current number of spare pv entries");
2768 #endif
2769
2770 /*
2771 * Is given page managed?
2772 */
2773 static __inline bool
is_managed(vm_paddr_t pa)2774 is_managed(vm_paddr_t pa)
2775 {
2776 vm_page_t m;
2777
2778 m = PHYS_TO_VM_PAGE(pa);
2779 if (m == NULL)
2780 return (false);
2781 return ((m->oflags & VPO_UNMANAGED) == 0);
2782 }
2783
2784 static __inline bool
pte1_is_managed(pt1_entry_t pte1)2785 pte1_is_managed(pt1_entry_t pte1)
2786 {
2787
2788 return (is_managed(pte1_pa(pte1)));
2789 }
2790
2791 static __inline bool
pte2_is_managed(pt2_entry_t pte2)2792 pte2_is_managed(pt2_entry_t pte2)
2793 {
2794
2795 return (is_managed(pte2_pa(pte2)));
2796 }
2797
2798 /*
2799 * We are in a serious low memory condition. Resort to
2800 * drastic measures to free some pages so we can allocate
2801 * another pv entry chunk.
2802 */
2803 static vm_page_t
pmap_pv_reclaim(pmap_t locked_pmap)2804 pmap_pv_reclaim(pmap_t locked_pmap)
2805 {
2806 struct pch newtail;
2807 struct pv_chunk *pc;
2808 struct md_page *pvh;
2809 pt1_entry_t *pte1p;
2810 pmap_t pmap;
2811 pt2_entry_t *pte2p, tpte2;
2812 pv_entry_t pv;
2813 vm_offset_t va;
2814 vm_page_t m, m_pc;
2815 struct spglist free;
2816 uint32_t inuse;
2817 int bit, field, freed;
2818
2819 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2820 pmap = NULL;
2821 m_pc = NULL;
2822 SLIST_INIT(&free);
2823 TAILQ_INIT(&newtail);
2824 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
2825 SLIST_EMPTY(&free))) {
2826 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2827 if (pmap != pc->pc_pmap) {
2828 if (pmap != NULL) {
2829 if (pmap != locked_pmap)
2830 PMAP_UNLOCK(pmap);
2831 }
2832 pmap = pc->pc_pmap;
2833 /* Avoid deadlock and lock recursion. */
2834 if (pmap > locked_pmap)
2835 PMAP_LOCK(pmap);
2836 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
2837 pmap = NULL;
2838 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2839 continue;
2840 }
2841 }
2842
2843 /*
2844 * Destroy every non-wired, 4 KB page mapping in the chunk.
2845 */
2846 freed = 0;
2847 for (field = 0; field < _NPCM; field++) {
2848 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2849 inuse != 0; inuse &= ~(1UL << bit)) {
2850 bit = ffs(inuse) - 1;
2851 pv = &pc->pc_pventry[field * 32 + bit];
2852 va = pv->pv_va;
2853 pte1p = pmap_pte1(pmap, va);
2854 if (pte1_is_section(pte1_load(pte1p)))
2855 continue;
2856 pte2p = pmap_pte2(pmap, va);
2857 tpte2 = pte2_load(pte2p);
2858 if ((tpte2 & PTE2_W) == 0)
2859 tpte2 = pte2_load_clear(pte2p);
2860 pmap_pte2_release(pte2p);
2861 if ((tpte2 & PTE2_W) != 0)
2862 continue;
2863 KASSERT(tpte2 != 0,
2864 ("pmap_pv_reclaim: pmap %p va %#x zero pte",
2865 pmap, va));
2866 pmap_tlb_flush(pmap, va);
2867 m = PHYS_TO_VM_PAGE(pte2_pa(tpte2));
2868 if (pte2_is_dirty(tpte2))
2869 vm_page_dirty(m);
2870 if ((tpte2 & PTE2_A) != 0)
2871 vm_page_aflag_set(m, PGA_REFERENCED);
2872 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2873 if (TAILQ_EMPTY(&m->md.pv_list) &&
2874 (m->flags & PG_FICTITIOUS) == 0) {
2875 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2876 if (TAILQ_EMPTY(&pvh->pv_list)) {
2877 vm_page_aflag_clear(m,
2878 PGA_WRITEABLE);
2879 }
2880 }
2881 pc->pc_map[field] |= 1UL << bit;
2882 pmap_unuse_pt2(pmap, va, &free);
2883 freed++;
2884 }
2885 }
2886 if (freed == 0) {
2887 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2888 continue;
2889 }
2890 /* Every freed mapping is for a 4 KB page. */
2891 pmap->pm_stats.resident_count -= freed;
2892 PV_STAT(pv_entry_frees += freed);
2893 PV_STAT(pv_entry_spare += freed);
2894 pv_entry_count -= freed;
2895 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2896 for (field = 0; field < _NPCM; field++)
2897 if (pc->pc_map[field] != pc_freemask[field]) {
2898 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2899 pc_list);
2900 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2901
2902 /*
2903 * One freed pv entry in locked_pmap is
2904 * sufficient.
2905 */
2906 if (pmap == locked_pmap)
2907 goto out;
2908 break;
2909 }
2910 if (field == _NPCM) {
2911 PV_STAT(pv_entry_spare -= _NPCPV);
2912 PV_STAT(pc_chunk_count--);
2913 PV_STAT(pc_chunk_frees++);
2914 /* Entire chunk is free; return it. */
2915 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2916 pmap_qremove((vm_offset_t)pc, 1);
2917 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc);
2918 break;
2919 }
2920 }
2921 out:
2922 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
2923 if (pmap != NULL) {
2924 if (pmap != locked_pmap)
2925 PMAP_UNLOCK(pmap);
2926 }
2927 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) {
2928 m_pc = SLIST_FIRST(&free);
2929 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2930 /* Recycle a freed page table page. */
2931 m_pc->ref_count = 1;
2932 vm_wire_add(1);
2933 }
2934 vm_page_free_pages_toq(&free, false);
2935 return (m_pc);
2936 }
2937
2938 static void
free_pv_chunk(struct pv_chunk * pc)2939 free_pv_chunk(struct pv_chunk *pc)
2940 {
2941 vm_page_t m;
2942
2943 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2944 PV_STAT(pv_entry_spare -= _NPCPV);
2945 PV_STAT(pc_chunk_count--);
2946 PV_STAT(pc_chunk_frees++);
2947 /* entire chunk is free, return it */
2948 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2949 pmap_qremove((vm_offset_t)pc, 1);
2950 vm_page_unwire_noq(m);
2951 vm_page_free(m);
2952 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc);
2953 }
2954
2955 /*
2956 * Free the pv_entry back to the free list.
2957 */
2958 static void
free_pv_entry(pmap_t pmap,pv_entry_t pv)2959 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2960 {
2961 struct pv_chunk *pc;
2962 int idx, field, bit;
2963
2964 rw_assert(&pvh_global_lock, RA_WLOCKED);
2965 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2966 PV_STAT(pv_entry_frees++);
2967 PV_STAT(pv_entry_spare++);
2968 pv_entry_count--;
2969 pc = pv_to_chunk(pv);
2970 idx = pv - &pc->pc_pventry[0];
2971 field = idx / 32;
2972 bit = idx % 32;
2973 pc->pc_map[field] |= 1ul << bit;
2974 for (idx = 0; idx < _NPCM; idx++)
2975 if (pc->pc_map[idx] != pc_freemask[idx]) {
2976 /*
2977 * 98% of the time, pc is already at the head of the
2978 * list. If it isn't already, move it to the head.
2979 */
2980 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
2981 pc)) {
2982 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2983 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2984 pc_list);
2985 }
2986 return;
2987 }
2988 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2989 free_pv_chunk(pc);
2990 }
2991
2992 /*
2993 * Get a new pv_entry, allocating a block from the system
2994 * when needed.
2995 */
2996 static pv_entry_t
get_pv_entry(pmap_t pmap,bool try)2997 get_pv_entry(pmap_t pmap, bool try)
2998 {
2999 static const struct timeval printinterval = { 60, 0 };
3000 static struct timeval lastprint;
3001 int bit, field;
3002 pv_entry_t pv;
3003 struct pv_chunk *pc;
3004 vm_page_t m;
3005
3006 rw_assert(&pvh_global_lock, RA_WLOCKED);
3007 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3008 PV_STAT(pv_entry_allocs++);
3009 pv_entry_count++;
3010 if (pv_entry_count > pv_entry_high_water)
3011 if (ratecheck(&lastprint, &printinterval))
3012 printf("Approaching the limit on PV entries, consider "
3013 "increasing either the vm.pmap.shpgperproc or the "
3014 "vm.pmap.pv_entry_max tunable.\n");
3015 retry:
3016 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3017 if (pc != NULL) {
3018 for (field = 0; field < _NPCM; field++) {
3019 if (pc->pc_map[field]) {
3020 bit = ffs(pc->pc_map[field]) - 1;
3021 break;
3022 }
3023 }
3024 if (field < _NPCM) {
3025 pv = &pc->pc_pventry[field * 32 + bit];
3026 pc->pc_map[field] &= ~(1ul << bit);
3027 /* If this was the last item, move it to tail */
3028 for (field = 0; field < _NPCM; field++)
3029 if (pc->pc_map[field] != 0) {
3030 PV_STAT(pv_entry_spare--);
3031 return (pv); /* not full, return */
3032 }
3033 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3034 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3035 PV_STAT(pv_entry_spare--);
3036 return (pv);
3037 }
3038 }
3039 /*
3040 * Access to the pte2list "pv_vafree" is synchronized by the pvh
3041 * global lock. If "pv_vafree" is currently non-empty, it will
3042 * remain non-empty until pmap_pte2list_alloc() completes.
3043 */
3044 if (pv_vafree == 0 ||
3045 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
3046 if (try) {
3047 pv_entry_count--;
3048 PV_STAT(pc_chunk_tryfail++);
3049 return (NULL);
3050 }
3051 m = pmap_pv_reclaim(pmap);
3052 if (m == NULL)
3053 goto retry;
3054 }
3055 PV_STAT(pc_chunk_count++);
3056 PV_STAT(pc_chunk_allocs++);
3057 pc = (struct pv_chunk *)pmap_pte2list_alloc(&pv_vafree);
3058 pmap_qenter((vm_offset_t)pc, &m, 1);
3059 pc->pc_pmap = pmap;
3060 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */
3061 for (field = 1; field < _NPCM; field++)
3062 pc->pc_map[field] = pc_freemask[field];
3063 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
3064 pv = &pc->pc_pventry[0];
3065 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3066 PV_STAT(pv_entry_spare += _NPCPV - 1);
3067 return (pv);
3068 }
3069
3070 /*
3071 * Create a pv entry for page at pa for
3072 * (pmap, va).
3073 */
3074 static void
pmap_insert_entry(pmap_t pmap,vm_offset_t va,vm_page_t m)3075 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
3076 {
3077 pv_entry_t pv;
3078
3079 rw_assert(&pvh_global_lock, RA_WLOCKED);
3080 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3081 pv = get_pv_entry(pmap, false);
3082 pv->pv_va = va;
3083 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3084 }
3085
3086 static __inline pv_entry_t
pmap_pvh_remove(struct md_page * pvh,pmap_t pmap,vm_offset_t va)3087 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3088 {
3089 pv_entry_t pv;
3090
3091 rw_assert(&pvh_global_lock, RA_WLOCKED);
3092 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3093 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
3094 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3095 break;
3096 }
3097 }
3098 return (pv);
3099 }
3100
3101 static void
pmap_pvh_free(struct md_page * pvh,pmap_t pmap,vm_offset_t va)3102 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3103 {
3104 pv_entry_t pv;
3105
3106 pv = pmap_pvh_remove(pvh, pmap, va);
3107 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
3108 free_pv_entry(pmap, pv);
3109 }
3110
3111 static void
pmap_remove_entry(pmap_t pmap,vm_page_t m,vm_offset_t va)3112 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
3113 {
3114 struct md_page *pvh;
3115
3116 rw_assert(&pvh_global_lock, RA_WLOCKED);
3117 pmap_pvh_free(&m->md, pmap, va);
3118 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
3119 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3120 if (TAILQ_EMPTY(&pvh->pv_list))
3121 vm_page_aflag_clear(m, PGA_WRITEABLE);
3122 }
3123 }
3124
3125 static void
pmap_pv_demote_pte1(pmap_t pmap,vm_offset_t va,vm_paddr_t pa)3126 pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3127 {
3128 struct md_page *pvh;
3129 pv_entry_t pv;
3130 vm_offset_t va_last;
3131 vm_page_t m;
3132
3133 rw_assert(&pvh_global_lock, RA_WLOCKED);
3134 KASSERT((pa & PTE1_OFFSET) == 0,
3135 ("pmap_pv_demote_pte1: pa is not 1mpage aligned"));
3136
3137 /*
3138 * Transfer the 1mpage's pv entry for this mapping to the first
3139 * page's pv list.
3140 */
3141 pvh = pa_to_pvh(pa);
3142 va = pte1_trunc(va);
3143 pv = pmap_pvh_remove(pvh, pmap, va);
3144 KASSERT(pv != NULL, ("pmap_pv_demote_pte1: pv not found"));
3145 m = PHYS_TO_VM_PAGE(pa);
3146 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3147 /* Instantiate the remaining NPTE2_IN_PT2 - 1 pv entries. */
3148 va_last = va + PTE1_SIZE - PAGE_SIZE;
3149 do {
3150 m++;
3151 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3152 ("pmap_pv_demote_pte1: page %p is not managed", m));
3153 va += PAGE_SIZE;
3154 pmap_insert_entry(pmap, va, m);
3155 } while (va < va_last);
3156 }
3157
3158 #if VM_NRESERVLEVEL > 0
3159 static void
pmap_pv_promote_pte1(pmap_t pmap,vm_offset_t va,vm_paddr_t pa)3160 pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
3161 {
3162 struct md_page *pvh;
3163 pv_entry_t pv;
3164 vm_offset_t va_last;
3165 vm_page_t m;
3166
3167 rw_assert(&pvh_global_lock, RA_WLOCKED);
3168 KASSERT((pa & PTE1_OFFSET) == 0,
3169 ("pmap_pv_promote_pte1: pa is not 1mpage aligned"));
3170
3171 /*
3172 * Transfer the first page's pv entry for this mapping to the
3173 * 1mpage's pv list. Aside from avoiding the cost of a call
3174 * to get_pv_entry(), a transfer avoids the possibility that
3175 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim()
3176 * removes one of the mappings that is being promoted.
3177 */
3178 m = PHYS_TO_VM_PAGE(pa);
3179 va = pte1_trunc(va);
3180 pv = pmap_pvh_remove(&m->md, pmap, va);
3181 KASSERT(pv != NULL, ("pmap_pv_promote_pte1: pv not found"));
3182 pvh = pa_to_pvh(pa);
3183 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3184 /* Free the remaining NPTE2_IN_PT2 - 1 pv entries. */
3185 va_last = va + PTE1_SIZE - PAGE_SIZE;
3186 do {
3187 m++;
3188 va += PAGE_SIZE;
3189 pmap_pvh_free(&m->md, pmap, va);
3190 } while (va < va_last);
3191 }
3192 #endif
3193
3194 /*
3195 * Conditionally create a pv entry.
3196 */
3197 static bool
pmap_try_insert_pv_entry(pmap_t pmap,vm_offset_t va,vm_page_t m)3198 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
3199 {
3200 pv_entry_t pv;
3201
3202 rw_assert(&pvh_global_lock, RA_WLOCKED);
3203 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3204 if (pv_entry_count < pv_entry_high_water &&
3205 (pv = get_pv_entry(pmap, true)) != NULL) {
3206 pv->pv_va = va;
3207 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3208 return (true);
3209 } else
3210 return (false);
3211 }
3212
3213 /*
3214 * Create the pv entries for each of the pages within a section.
3215 */
3216 static bool
pmap_pv_insert_pte1(pmap_t pmap,vm_offset_t va,pt1_entry_t pte1,u_int flags)3217 pmap_pv_insert_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags)
3218 {
3219 struct md_page *pvh;
3220 pv_entry_t pv;
3221 bool noreclaim;
3222
3223 rw_assert(&pvh_global_lock, RA_WLOCKED);
3224 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0;
3225 if ((noreclaim && pv_entry_count >= pv_entry_high_water) ||
3226 (pv = get_pv_entry(pmap, noreclaim)) == NULL)
3227 return (false);
3228 pv->pv_va = va;
3229 pvh = pa_to_pvh(pte1_pa(pte1));
3230 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3231 return (true);
3232 }
3233
3234 static inline void
pmap_tlb_flush_pte1(pmap_t pmap,vm_offset_t va,pt1_entry_t npte1)3235 pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1)
3236 {
3237
3238 /* Kill all the small mappings or the big one only. */
3239 if (pte1_is_section(npte1))
3240 pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE);
3241 else
3242 pmap_tlb_flush(pmap, pte1_trunc(va));
3243 }
3244
3245 /*
3246 * Update kernel pte1 on all pmaps.
3247 *
3248 * The following function is called only on one cpu with disabled interrupts.
3249 * In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way
3250 * nobody can invoke explicit hardware table walk during the update of pte1.
3251 * Unsolicited hardware table walk can still happen, invoked by speculative
3252 * data or instruction prefetch or even by speculative hardware table walk.
3253 *
3254 * The break-before-make approach should be implemented here. However, it's
3255 * not so easy to do that for kernel mappings as it would be unhappy to unmap
3256 * itself unexpectedly but voluntarily.
3257 */
3258 static void
pmap_update_pte1_kernel(vm_offset_t va,pt1_entry_t npte1)3259 pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1)
3260 {
3261 pmap_t pmap;
3262 pt1_entry_t *pte1p;
3263
3264 /*
3265 * Get current pmap. Interrupts should be disabled here
3266 * so PCPU_GET() is done atomically.
3267 */
3268 pmap = PCPU_GET(curpmap);
3269 if (pmap == NULL)
3270 pmap = kernel_pmap;
3271
3272 /*
3273 * (1) Change pte1 on current pmap.
3274 * (2) Flush all obsolete TLB entries on current CPU.
3275 * (3) Change pte1 on all pmaps.
3276 * (4) Flush all obsolete TLB entries on all CPUs in SMP case.
3277 */
3278
3279 pte1p = pmap_pte1(pmap, va);
3280 pte1_store(pte1p, npte1);
3281
3282 /* Kill all the small mappings or the big one only. */
3283 if (pte1_is_section(npte1)) {
3284 pmap_pte1_kern_promotions++;
3285 tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE);
3286 } else {
3287 pmap_pte1_kern_demotions++;
3288 tlb_flush_local(pte1_trunc(va));
3289 }
3290
3291 /*
3292 * In SMP case, this function is called when all cpus are at smp
3293 * rendezvous, so there is no need to use 'allpmaps_lock' lock here.
3294 * In UP case, the function is called with this lock locked.
3295 */
3296 LIST_FOREACH(pmap, &allpmaps, pm_list) {
3297 pte1p = pmap_pte1(pmap, va);
3298 pte1_store(pte1p, npte1);
3299 }
3300
3301 #ifdef SMP
3302 /* Kill all the small mappings or the big one only. */
3303 if (pte1_is_section(npte1))
3304 tlb_flush_range(pte1_trunc(va), PTE1_SIZE);
3305 else
3306 tlb_flush(pte1_trunc(va));
3307 #endif
3308 }
3309
3310 #ifdef SMP
3311 struct pte1_action {
3312 vm_offset_t va;
3313 pt1_entry_t npte1;
3314 u_int update; /* CPU that updates the PTE1 */
3315 };
3316
3317 static void
pmap_update_pte1_action(void * arg)3318 pmap_update_pte1_action(void *arg)
3319 {
3320 struct pte1_action *act = arg;
3321
3322 if (act->update == PCPU_GET(cpuid))
3323 pmap_update_pte1_kernel(act->va, act->npte1);
3324 }
3325
3326 /*
3327 * Change pte1 on current pmap.
3328 * Note that kernel pte1 must be changed on all pmaps.
3329 *
3330 * According to the architecture reference manual published by ARM,
3331 * the behaviour is UNPREDICTABLE when two or more TLB entries map the same VA.
3332 * According to this manual, UNPREDICTABLE behaviours must never happen in
3333 * a viable system. In contrast, on x86 processors, it is not specified which
3334 * TLB entry mapping the virtual address will be used, but the MMU doesn't
3335 * generate a bogus translation the way it does on Cortex-A8 rev 2 (Beaglebone
3336 * Black).
3337 *
3338 * It's a problem when either promotion or demotion is being done. The pte1
3339 * update and appropriate TLB flush must be done atomically in general.
3340 */
3341 static void
pmap_change_pte1(pmap_t pmap,pt1_entry_t * pte1p,vm_offset_t va,pt1_entry_t npte1)3342 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va,
3343 pt1_entry_t npte1)
3344 {
3345
3346 if (pmap == kernel_pmap) {
3347 struct pte1_action act;
3348
3349 sched_pin();
3350 act.va = va;
3351 act.npte1 = npte1;
3352 act.update = PCPU_GET(cpuid);
3353 smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier,
3354 pmap_update_pte1_action, NULL, &act);
3355 sched_unpin();
3356 } else {
3357 register_t cspr;
3358
3359 /*
3360 * Use break-before-make approach for changing userland
3361 * mappings. It can cause L1 translation aborts on other
3362 * cores in SMP case. So, special treatment is implemented
3363 * in pmap_fault(). To reduce the likelihood that another core
3364 * will be affected by the broken mapping, disable interrupts
3365 * until the mapping change is completed.
3366 */
3367 cspr = disable_interrupts(PSR_I);
3368 pte1_clear(pte1p);
3369 pmap_tlb_flush_pte1(pmap, va, npte1);
3370 pte1_store(pte1p, npte1);
3371 restore_interrupts(cspr);
3372 }
3373 }
3374 #else
3375 static void
pmap_change_pte1(pmap_t pmap,pt1_entry_t * pte1p,vm_offset_t va,pt1_entry_t npte1)3376 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va,
3377 pt1_entry_t npte1)
3378 {
3379
3380 if (pmap == kernel_pmap) {
3381 mtx_lock_spin(&allpmaps_lock);
3382 pmap_update_pte1_kernel(va, npte1);
3383 mtx_unlock_spin(&allpmaps_lock);
3384 } else {
3385 register_t cspr;
3386
3387 /*
3388 * Use break-before-make approach for changing userland
3389 * mappings. It's absolutely safe in UP case when interrupts
3390 * are disabled.
3391 */
3392 cspr = disable_interrupts(PSR_I);
3393 pte1_clear(pte1p);
3394 pmap_tlb_flush_pte1(pmap, va, npte1);
3395 pte1_store(pte1p, npte1);
3396 restore_interrupts(cspr);
3397 }
3398 }
3399 #endif
3400
3401 #if VM_NRESERVLEVEL > 0
3402 /*
3403 * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are
3404 * within a single page table page (PT2) to a single 1MB page mapping.
3405 * For promotion to occur, two conditions must be met: (1) the 4KB page
3406 * mappings must map aligned, contiguous physical memory and (2) the 4KB page
3407 * mappings must have identical characteristics.
3408 *
3409 * Managed (PG_MANAGED) mappings within the kernel address space are not
3410 * promoted. The reason is that kernel PTE1s are replicated in each pmap but
3411 * pmap_remove_write(), pmap_clear_modify(), and pmap_clear_reference() only
3412 * read the PTE1 from the kernel pmap.
3413 */
3414 static void
pmap_promote_pte1(pmap_t pmap,pt1_entry_t * pte1p,vm_offset_t va)3415 pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
3416 {
3417 pt1_entry_t npte1;
3418 pt2_entry_t *fpte2p, fpte2, fpte2_fav;
3419 pt2_entry_t *pte2p, pte2;
3420 vm_offset_t pteva __unused;
3421 vm_page_t m __unused;
3422
3423 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__,
3424 pmap, va, pte1_load(pte1p), pte1p));
3425
3426 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3427
3428 /*
3429 * Examine the first PTE2 in the specified PT2. Abort if this PTE2 is
3430 * either invalid, unused, or does not map the first 4KB physical page
3431 * within a 1MB page.
3432 */
3433 fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va));
3434 fpte2 = pte2_load(fpte2p);
3435 if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) !=
3436 (PTE2_A | PTE2_V)) {
3437 pmap_pte1_p_failures++;
3438 CTR3(KTR_PMAP, "%s: failure(1) for va %#x in pmap %p",
3439 __func__, va, pmap);
3440 return;
3441 }
3442 if (pte2_is_managed(fpte2) && pmap == kernel_pmap) {
3443 pmap_pte1_p_failures++;
3444 CTR3(KTR_PMAP, "%s: failure(2) for va %#x in pmap %p",
3445 __func__, va, pmap);
3446 return;
3447 }
3448 if ((fpte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) {
3449 /*
3450 * When page is not modified, PTE2_RO can be set without
3451 * a TLB invalidation.
3452 */
3453 fpte2 |= PTE2_RO;
3454 pte2_store(fpte2p, fpte2);
3455 }
3456
3457 /*
3458 * Examine each of the other PTE2s in the specified PT2. Abort if this
3459 * PTE2 maps an unexpected 4KB physical page or does not have identical
3460 * characteristics to the first PTE2.
3461 */
3462 fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V));
3463 fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */
3464 for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) {
3465 pte2 = pte2_load(pte2p);
3466 if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) {
3467 pmap_pte1_p_failures++;
3468 CTR3(KTR_PMAP, "%s: failure(3) for va %#x in pmap %p",
3469 __func__, va, pmap);
3470 return;
3471 }
3472 if ((pte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) {
3473 /*
3474 * When page is not modified, PTE2_RO can be set
3475 * without a TLB invalidation. See note above.
3476 */
3477 pte2 |= PTE2_RO;
3478 pte2_store(pte2p, pte2);
3479 pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET &
3480 PTE2_FRAME);
3481 CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p",
3482 __func__, pteva, pmap);
3483 }
3484 if ((pte2 & PTE2_PROMOTE) != (fpte2 & PTE2_PROMOTE)) {
3485 pmap_pte1_p_failures++;
3486 CTR3(KTR_PMAP, "%s: failure(4) for va %#x in pmap %p",
3487 __func__, va, pmap);
3488 return;
3489 }
3490
3491 fpte2_fav -= PTE2_SIZE;
3492 }
3493 /*
3494 * The page table page in its current state will stay in PT2TAB
3495 * until the PTE1 mapping the section is demoted by pmap_demote_pte1()
3496 * or destroyed by pmap_remove_pte1().
3497 *
3498 * Note that L2 page table size is not equal to PAGE_SIZE.
3499 */
3500 m = PHYS_TO_VM_PAGE(trunc_page(pte1_link_pa(pte1_load(pte1p))));
3501 KASSERT(m >= vm_page_array && m < &vm_page_array[vm_page_array_size],
3502 ("%s: PT2 page is out of range", __func__));
3503 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK),
3504 ("%s: PT2 page's pindex is wrong", __func__));
3505
3506 /*
3507 * Get pte1 from pte2 format.
3508 */
3509 npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V;
3510
3511 /*
3512 * Promote the pv entries.
3513 */
3514 if (pte2_is_managed(fpte2))
3515 pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1));
3516
3517 /*
3518 * Promote the mappings.
3519 */
3520 pmap_change_pte1(pmap, pte1p, va, npte1);
3521
3522 pmap_pte1_promotions++;
3523 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p",
3524 __func__, va, pmap);
3525
3526 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n",
3527 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p));
3528 }
3529 #endif /* VM_NRESERVLEVEL > 0 */
3530
3531 /*
3532 * Zero L2 page table page.
3533 */
3534 static __inline void
pmap_clear_pt2(pt2_entry_t * fpte2p)3535 pmap_clear_pt2(pt2_entry_t *fpte2p)
3536 {
3537 pt2_entry_t *pte2p;
3538
3539 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++)
3540 pte2_clear(pte2p);
3541
3542 }
3543
3544 /*
3545 * Removes a 1MB page mapping from the kernel pmap.
3546 */
3547 static void
pmap_remove_kernel_pte1(pmap_t pmap,pt1_entry_t * pte1p,vm_offset_t va)3548 pmap_remove_kernel_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
3549 {
3550 vm_page_t m;
3551 uint32_t pte1_idx;
3552 pt2_entry_t *fpte2p;
3553 vm_paddr_t pt2_pa;
3554
3555 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3556 m = pmap_pt2_page(pmap, va);
3557 if (m == NULL)
3558 /*
3559 * QQQ: Is this function called only on promoted pte1?
3560 * We certainly do section mappings directly
3561 * (without promotion) in kernel !!!
3562 */
3563 panic("%s: missing pt2 page", __func__);
3564
3565 pte1_idx = pte1_index(va);
3566
3567 /*
3568 * Initialize the L2 page table.
3569 */
3570 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx);
3571 pmap_clear_pt2(fpte2p);
3572
3573 /*
3574 * Remove the mapping.
3575 */
3576 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(m), pte1_idx);
3577 pmap_kenter_pte1(va, PTE1_LINK(pt2_pa));
3578
3579 /*
3580 * QQQ: We do not need to invalidate PT2MAP mapping
3581 * as we did not change it. I.e. the L2 page table page
3582 * was and still is mapped the same way.
3583 */
3584 }
3585
3586 /*
3587 * Do the things to unmap a section in a process
3588 */
3589 static void
pmap_remove_pte1(pmap_t pmap,pt1_entry_t * pte1p,vm_offset_t sva,struct spglist * free)3590 pmap_remove_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva,
3591 struct spglist *free)
3592 {
3593 pt1_entry_t opte1;
3594 struct md_page *pvh;
3595 vm_offset_t eva, va;
3596 vm_page_t m;
3597
3598 PDEBUG(6, printf("%s(%p): va %#x pte1 %#x at %p\n", __func__, pmap, sva,
3599 pte1_load(pte1p), pte1p));
3600
3601 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3602 KASSERT((sva & PTE1_OFFSET) == 0,
3603 ("%s: sva is not 1mpage aligned", __func__));
3604
3605 /*
3606 * Clear and invalidate the mapping. It should occupy one and only TLB
3607 * entry. So, pmap_tlb_flush() called with aligned address should be
3608 * sufficient.
3609 */
3610 opte1 = pte1_load_clear(pte1p);
3611 pmap_tlb_flush(pmap, sva);
3612
3613 if (pte1_is_wired(opte1))
3614 pmap->pm_stats.wired_count -= PTE1_SIZE / PAGE_SIZE;
3615 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE;
3616 if (pte1_is_managed(opte1)) {
3617 pvh = pa_to_pvh(pte1_pa(opte1));
3618 pmap_pvh_free(pvh, pmap, sva);
3619 eva = sva + PTE1_SIZE;
3620 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1));
3621 va < eva; va += PAGE_SIZE, m++) {
3622 if (pte1_is_dirty(opte1))
3623 vm_page_dirty(m);
3624 if (opte1 & PTE1_A)
3625 vm_page_aflag_set(m, PGA_REFERENCED);
3626 if (TAILQ_EMPTY(&m->md.pv_list) &&
3627 TAILQ_EMPTY(&pvh->pv_list))
3628 vm_page_aflag_clear(m, PGA_WRITEABLE);
3629 }
3630 }
3631 if (pmap == kernel_pmap) {
3632 /*
3633 * L2 page table(s) can't be removed from kernel map as
3634 * kernel counts on it (stuff around pmap_growkernel()).
3635 */
3636 pmap_remove_kernel_pte1(pmap, pte1p, sva);
3637 } else {
3638 /*
3639 * Get associated L2 page table page.
3640 * It's possible that the page was never allocated.
3641 */
3642 m = pmap_pt2_page(pmap, sva);
3643 if (m != NULL)
3644 pmap_unwire_pt2_all(pmap, sva, m, free);
3645 }
3646 }
3647
3648 /*
3649 * Fills L2 page table page with mappings to consecutive physical pages.
3650 */
3651 static __inline void
pmap_fill_pt2(pt2_entry_t * fpte2p,pt2_entry_t npte2)3652 pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2)
3653 {
3654 pt2_entry_t *pte2p;
3655
3656 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) {
3657 pte2_store(pte2p, npte2);
3658 npte2 += PTE2_SIZE;
3659 }
3660 }
3661
3662 /*
3663 * Tries to demote a 1MB page mapping. If demotion fails, the
3664 * 1MB page mapping is invalidated.
3665 */
3666 static bool
pmap_demote_pte1(pmap_t pmap,pt1_entry_t * pte1p,vm_offset_t va)3667 pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
3668 {
3669 pt1_entry_t opte1, npte1;
3670 pt2_entry_t *fpte2p, npte2;
3671 vm_paddr_t pt2pg_pa, pt2_pa;
3672 vm_page_t m;
3673 struct spglist free;
3674 uint32_t pte1_idx, isnew = 0;
3675
3676 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__,
3677 pmap, va, pte1_load(pte1p), pte1p));
3678
3679 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3680
3681 opte1 = pte1_load(pte1p);
3682 KASSERT(pte1_is_section(opte1), ("%s: opte1 not a section", __func__));
3683
3684 if ((opte1 & PTE1_A) == 0 || (m = pmap_pt2_page(pmap, va)) == NULL) {
3685 KASSERT(!pte1_is_wired(opte1),
3686 ("%s: PT2 page for a wired mapping is missing", __func__));
3687
3688 /*
3689 * Invalidate the 1MB page mapping and return
3690 * "failure" if the mapping was never accessed or the
3691 * allocation of the new page table page fails.
3692 */
3693 if ((opte1 & PTE1_A) == 0 ||
3694 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
3695 SLIST_INIT(&free);
3696 pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free);
3697 vm_page_free_pages_toq(&free, false);
3698 CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p",
3699 __func__, va, pmap);
3700 return (false);
3701 }
3702 m->pindex = pte1_index(va) & ~PT2PG_MASK;
3703 if (va < VM_MAXUSER_ADDRESS)
3704 pmap->pm_stats.resident_count++;
3705
3706 isnew = 1;
3707
3708 /*
3709 * We init all L2 page tables in the page even if
3710 * we are going to change everything for one L2 page
3711 * table in a while.
3712 */
3713 pt2pg_pa = pmap_pt2pg_init(pmap, va, m);
3714 } else {
3715 if (va < VM_MAXUSER_ADDRESS) {
3716 if (pt2_is_empty(m, va))
3717 isnew = 1; /* Demoting section w/o promotion. */
3718 #ifdef INVARIANTS
3719 else
3720 KASSERT(pt2_is_full(m, va), ("%s: bad PT2 wire"
3721 " count %u", __func__,
3722 pt2_wirecount_get(m, pte1_index(va))));
3723 #endif
3724 }
3725 }
3726
3727 pt2pg_pa = VM_PAGE_TO_PHYS(m);
3728 pte1_idx = pte1_index(va);
3729 /*
3730 * If the pmap is current, then the PT2MAP can provide access to
3731 * the page table page (promoted L2 page tables are not unmapped).
3732 * Otherwise, temporarily map the L2 page table page (m) into
3733 * the kernel's address space at either PADDR1 or PADDR2.
3734 *
3735 * Note that L2 page table size is not equal to PAGE_SIZE.
3736 */
3737 if (pmap_is_current(pmap))
3738 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx);
3739 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) {
3740 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) {
3741 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa));
3742 #ifdef SMP
3743 PMAP1cpu = PCPU_GET(cpuid);
3744 #endif
3745 tlb_flush_local((vm_offset_t)PADDR1);
3746 PMAP1changed++;
3747 } else
3748 #ifdef SMP
3749 if (PMAP1cpu != PCPU_GET(cpuid)) {
3750 PMAP1cpu = PCPU_GET(cpuid);
3751 tlb_flush_local((vm_offset_t)PADDR1);
3752 PMAP1changedcpu++;
3753 } else
3754 #endif
3755 PMAP1unchanged++;
3756 fpte2p = page_pt2((vm_offset_t)PADDR1, pte1_idx);
3757 } else {
3758 mtx_lock(&PMAP2mutex);
3759 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) {
3760 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa));
3761 tlb_flush((vm_offset_t)PADDR2);
3762 }
3763 fpte2p = page_pt2((vm_offset_t)PADDR2, pte1_idx);
3764 }
3765 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx);
3766 npte1 = PTE1_LINK(pt2_pa);
3767
3768 KASSERT((opte1 & PTE1_A) != 0,
3769 ("%s: opte1 is missing PTE1_A", __func__));
3770 KASSERT((opte1 & (PTE1_NM | PTE1_RO)) != PTE1_NM,
3771 ("%s: opte1 has PTE1_NM", __func__));
3772
3773 /*
3774 * Get pte2 from pte1 format.
3775 */
3776 npte2 = pte1_pa(opte1) | ATTR_TO_L2(opte1) | PTE2_V;
3777
3778 /*
3779 * If the L2 page table page is new, initialize it. If the mapping
3780 * has changed attributes, update the page table entries.
3781 */
3782 if (isnew != 0) {
3783 pt2_wirecount_set(m, pte1_idx, NPTE2_IN_PT2);
3784 pmap_fill_pt2(fpte2p, npte2);
3785 } else if ((pte2_load(fpte2p) & PTE2_PROMOTE) !=
3786 (npte2 & PTE2_PROMOTE))
3787 pmap_fill_pt2(fpte2p, npte2);
3788
3789 KASSERT(pte2_pa(pte2_load(fpte2p)) == pte2_pa(npte2),
3790 ("%s: fpte2p and npte2 map different physical addresses",
3791 __func__));
3792
3793 if (fpte2p == PADDR2)
3794 mtx_unlock(&PMAP2mutex);
3795
3796 /*
3797 * Demote the mapping. This pmap is locked. The old PTE1 has
3798 * PTE1_A set. If the old PTE1 has not PTE1_RO set, it also
3799 * has not PTE1_NM set. Thus, there is no danger of a race with
3800 * another processor changing the setting of PTE1_A and/or PTE1_NM
3801 * between the read above and the store below.
3802 */
3803 pmap_change_pte1(pmap, pte1p, va, npte1);
3804
3805 /*
3806 * Demote the pv entry. This depends on the earlier demotion
3807 * of the mapping. Specifically, the (re)creation of a per-
3808 * page pv entry might trigger the execution of pmap_pv_reclaim(),
3809 * which might reclaim a newly (re)created per-page pv entry
3810 * and destroy the associated mapping. In order to destroy
3811 * the mapping, the PTE1 must have already changed from mapping
3812 * the 1mpage to referencing the page table page.
3813 */
3814 if (pte1_is_managed(opte1))
3815 pmap_pv_demote_pte1(pmap, va, pte1_pa(opte1));
3816
3817 pmap_pte1_demotions++;
3818 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p",
3819 __func__, va, pmap);
3820
3821 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n",
3822 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p));
3823 return (true);
3824 }
3825
3826 /*
3827 * Insert the given physical page (p) at
3828 * the specified virtual address (v) in the
3829 * target physical map with the protection requested.
3830 *
3831 * If specified, the page will be wired down, meaning
3832 * that the related pte can not be reclaimed.
3833 *
3834 * NB: This is the only routine which MAY NOT lazy-evaluate
3835 * or lose information. That is, this routine must actually
3836 * insert this page into the given map NOW.
3837 */
3838 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)3839 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3840 u_int flags, int8_t psind)
3841 {
3842 pt1_entry_t *pte1p;
3843 pt2_entry_t *pte2p;
3844 pt2_entry_t npte2, opte2;
3845 pv_entry_t pv;
3846 vm_paddr_t opa, pa;
3847 vm_page_t mpte2, om;
3848 int rv;
3849
3850 va = trunc_page(va);
3851 KASSERT(va <= vm_max_kernel_address, ("%s: toobig", __func__));
3852 KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS,
3853 ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__,
3854 va));
3855 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
3856 ("%s: managed mapping within the clean submap", __func__));
3857 if ((m->oflags & VPO_UNMANAGED) == 0)
3858 VM_PAGE_OBJECT_BUSY_ASSERT(m);
3859 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
3860 ("%s: flags %u has reserved bits set", __func__, flags));
3861 pa = VM_PAGE_TO_PHYS(m);
3862 npte2 = PTE2(pa, PTE2_A, vm_page_pte2_attr(m));
3863 if ((flags & VM_PROT_WRITE) == 0)
3864 npte2 |= PTE2_NM;
3865 if ((prot & VM_PROT_WRITE) == 0)
3866 npte2 |= PTE2_RO;
3867 KASSERT((npte2 & (PTE2_NM | PTE2_RO)) != PTE2_RO,
3868 ("%s: flags includes VM_PROT_WRITE but prot doesn't", __func__));
3869 if ((prot & VM_PROT_EXECUTE) == 0)
3870 npte2 |= PTE2_NX;
3871 if ((flags & PMAP_ENTER_WIRED) != 0)
3872 npte2 |= PTE2_W;
3873 if (va < VM_MAXUSER_ADDRESS)
3874 npte2 |= PTE2_U;
3875 if (pmap != kernel_pmap)
3876 npte2 |= PTE2_NG;
3877
3878 rw_wlock(&pvh_global_lock);
3879 PMAP_LOCK(pmap);
3880 sched_pin();
3881 if (psind == 1) {
3882 /* Assert the required virtual and physical alignment. */
3883 KASSERT((va & PTE1_OFFSET) == 0,
3884 ("%s: va unaligned", __func__));
3885 KASSERT(m->psind > 0, ("%s: m->psind < psind", __func__));
3886 rv = pmap_enter_pte1(pmap, va, PTE1_PA(pa) | ATTR_TO_L1(npte2) |
3887 PTE1_V, flags, m);
3888 goto out;
3889 }
3890
3891 /*
3892 * In the case that a page table page is not
3893 * resident, we are creating it here.
3894 */
3895 if (va < VM_MAXUSER_ADDRESS) {
3896 mpte2 = pmap_allocpte2(pmap, va, flags);
3897 if (mpte2 == NULL) {
3898 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
3899 ("pmap_allocpte2 failed with sleep allowed"));
3900 rv = KERN_RESOURCE_SHORTAGE;
3901 goto out;
3902 }
3903 } else
3904 mpte2 = NULL;
3905 pte1p = pmap_pte1(pmap, va);
3906 if (pte1_is_section(pte1_load(pte1p)))
3907 panic("%s: attempted on 1MB page", __func__);
3908 pte2p = pmap_pte2_quick(pmap, va);
3909 if (pte2p == NULL)
3910 panic("%s: invalid L1 page table entry va=%#x", __func__, va);
3911
3912 om = NULL;
3913 opte2 = pte2_load(pte2p);
3914 opa = pte2_pa(opte2);
3915 /*
3916 * Mapping has not changed, must be protection or wiring change.
3917 */
3918 if (pte2_is_valid(opte2) && (opa == pa)) {
3919 /*
3920 * Wiring change, just update stats. We don't worry about
3921 * wiring PT2 pages as they remain resident as long as there
3922 * are valid mappings in them. Hence, if a user page is wired,
3923 * the PT2 page will be also.
3924 */
3925 if (pte2_is_wired(npte2) && !pte2_is_wired(opte2))
3926 pmap->pm_stats.wired_count++;
3927 else if (!pte2_is_wired(npte2) && pte2_is_wired(opte2))
3928 pmap->pm_stats.wired_count--;
3929
3930 /*
3931 * Remove extra pte2 reference
3932 */
3933 if (mpte2)
3934 pt2_wirecount_dec(mpte2, pte1_index(va));
3935 if ((m->oflags & VPO_UNMANAGED) == 0)
3936 om = m;
3937 goto validate;
3938 }
3939
3940 /*
3941 * QQQ: We think that changing physical address on writeable mapping
3942 * is not safe. Well, maybe on kernel address space with correct
3943 * locking, it can make a sense. However, we have no idea why
3944 * anyone should do that on user address space. Are we wrong?
3945 */
3946 KASSERT((opa == 0) || (opa == pa) ||
3947 !pte2_is_valid(opte2) || ((opte2 & PTE2_RO) != 0),
3948 ("%s: pmap %p va %#x(%#x) opa %#x pa %#x - gotcha %#x %#x!",
3949 __func__, pmap, va, opte2, opa, pa, flags, prot));
3950
3951 pv = NULL;
3952
3953 /*
3954 * Mapping has changed, invalidate old range and fall through to
3955 * handle validating new mapping.
3956 */
3957 if (opa) {
3958 if (pte2_is_wired(opte2))
3959 pmap->pm_stats.wired_count--;
3960 om = PHYS_TO_VM_PAGE(opa);
3961 if (om != NULL && (om->oflags & VPO_UNMANAGED) != 0)
3962 om = NULL;
3963 if (om != NULL)
3964 pv = pmap_pvh_remove(&om->md, pmap, va);
3965
3966 /*
3967 * Remove extra pte2 reference
3968 */
3969 if (mpte2 != NULL)
3970 pt2_wirecount_dec(mpte2, va >> PTE1_SHIFT);
3971 } else
3972 pmap->pm_stats.resident_count++;
3973
3974 /*
3975 * Enter on the PV list if part of our managed memory.
3976 */
3977 if ((m->oflags & VPO_UNMANAGED) == 0) {
3978 if (pv == NULL) {
3979 pv = get_pv_entry(pmap, false);
3980 pv->pv_va = va;
3981 }
3982 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3983 } else if (pv != NULL)
3984 free_pv_entry(pmap, pv);
3985
3986 /*
3987 * Increment counters
3988 */
3989 if (pte2_is_wired(npte2))
3990 pmap->pm_stats.wired_count++;
3991
3992 validate:
3993 /*
3994 * Now validate mapping with desired protection/wiring.
3995 */
3996 if (prot & VM_PROT_WRITE) {
3997 if ((m->oflags & VPO_UNMANAGED) == 0)
3998 vm_page_aflag_set(m, PGA_WRITEABLE);
3999 }
4000
4001 /*
4002 * If the mapping or permission bits are different, we need
4003 * to update the pte2.
4004 *
4005 * QQQ: Think again and again what to do
4006 * if the mapping is going to be changed!
4007 */
4008 if ((opte2 & ~(PTE2_NM | PTE2_A)) != (npte2 & ~(PTE2_NM | PTE2_A))) {
4009 /*
4010 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA
4011 * is set. Do it now, before the mapping is stored and made
4012 * valid for hardware table walk. If done later, there is a race
4013 * for other threads of current process in lazy loading case.
4014 * Don't do it for kernel memory which is mapped with exec
4015 * permission even if the memory isn't going to hold executable
4016 * code. The only time when icache sync is needed is after
4017 * kernel module is loaded and the relocation info is processed.
4018 * And it's done in elf_cpu_load_file().
4019 *
4020 * QQQ: (1) Does it exist any better way where
4021 * or how to sync icache?
4022 * (2) Now, we do it on a page basis.
4023 */
4024 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
4025 m->md.pat_mode == VM_MEMATTR_WB_WA &&
4026 (opa != pa || (opte2 & PTE2_NX)))
4027 cache_icache_sync_fresh(va, pa, PAGE_SIZE);
4028
4029 if (opte2 & PTE2_V) {
4030 /* Change mapping with break-before-make approach. */
4031 opte2 = pte2_load_clear(pte2p);
4032 pmap_tlb_flush(pmap, va);
4033 pte2_store(pte2p, npte2);
4034 if (om != NULL) {
4035 KASSERT((om->oflags & VPO_UNMANAGED) == 0,
4036 ("%s: om %p unmanaged", __func__, om));
4037 if ((opte2 & PTE2_A) != 0)
4038 vm_page_aflag_set(om, PGA_REFERENCED);
4039 if (pte2_is_dirty(opte2))
4040 vm_page_dirty(om);
4041 if (TAILQ_EMPTY(&om->md.pv_list) &&
4042 ((om->flags & PG_FICTITIOUS) != 0 ||
4043 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
4044 vm_page_aflag_clear(om, PGA_WRITEABLE);
4045 }
4046 } else
4047 pte2_store(pte2p, npte2);
4048 }
4049 #if 0
4050 else {
4051 /*
4052 * QQQ: In time when both access and not mofified bits are
4053 * emulated by software, this should not happen. Some
4054 * analysis is need, if this really happen. Missing
4055 * tlb flush somewhere could be the reason.
4056 */
4057 panic("%s: pmap %p va %#x opte2 %x npte2 %x !!", __func__, pmap,
4058 va, opte2, npte2);
4059 }
4060 #endif
4061
4062 #if VM_NRESERVLEVEL > 0
4063 /*
4064 * If both the L2 page table page and the reservation are fully
4065 * populated, then attempt promotion.
4066 */
4067 if ((mpte2 == NULL || pt2_is_full(mpte2, va)) &&
4068 sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
4069 vm_reserv_level_iffullpop(m) == 0)
4070 pmap_promote_pte1(pmap, pte1p, va);
4071 #endif
4072
4073 rv = KERN_SUCCESS;
4074 out:
4075 sched_unpin();
4076 rw_wunlock(&pvh_global_lock);
4077 PMAP_UNLOCK(pmap);
4078 return (rv);
4079 }
4080
4081 /*
4082 * Do the things to unmap a page in a process.
4083 */
4084 static int
pmap_remove_pte2(pmap_t pmap,pt2_entry_t * pte2p,vm_offset_t va,struct spglist * free)4085 pmap_remove_pte2(pmap_t pmap, pt2_entry_t *pte2p, vm_offset_t va,
4086 struct spglist *free)
4087 {
4088 pt2_entry_t opte2;
4089 vm_page_t m;
4090
4091 rw_assert(&pvh_global_lock, RA_WLOCKED);
4092 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4093
4094 /* Clear and invalidate the mapping. */
4095 opte2 = pte2_load_clear(pte2p);
4096 pmap_tlb_flush(pmap, va);
4097
4098 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %#x not link pte2 %#x",
4099 __func__, pmap, va, opte2));
4100
4101 if (opte2 & PTE2_W)
4102 pmap->pm_stats.wired_count -= 1;
4103 pmap->pm_stats.resident_count -= 1;
4104 if (pte2_is_managed(opte2)) {
4105 m = PHYS_TO_VM_PAGE(pte2_pa(opte2));
4106 if (pte2_is_dirty(opte2))
4107 vm_page_dirty(m);
4108 if (opte2 & PTE2_A)
4109 vm_page_aflag_set(m, PGA_REFERENCED);
4110 pmap_remove_entry(pmap, m, va);
4111 }
4112 return (pmap_unuse_pt2(pmap, va, free));
4113 }
4114
4115 /*
4116 * Remove a single page from a process address space.
4117 */
4118 static void
pmap_remove_page(pmap_t pmap,vm_offset_t va,struct spglist * free)4119 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
4120 {
4121 pt2_entry_t *pte2p;
4122
4123 rw_assert(&pvh_global_lock, RA_WLOCKED);
4124 KASSERT(curthread->td_pinned > 0,
4125 ("%s: curthread not pinned", __func__));
4126 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4127 if ((pte2p = pmap_pte2_quick(pmap, va)) == NULL ||
4128 !pte2_is_valid(pte2_load(pte2p)))
4129 return;
4130 pmap_remove_pte2(pmap, pte2p, va, free);
4131 }
4132
4133 /*
4134 * Remove the given range of addresses from the specified map.
4135 *
4136 * It is assumed that the start and end are properly
4137 * rounded to the page size.
4138 */
4139 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)4140 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4141 {
4142 vm_offset_t nextva;
4143 pt1_entry_t *pte1p, pte1;
4144 pt2_entry_t *pte2p, pte2;
4145 struct spglist free;
4146
4147 /*
4148 * Perform an unsynchronized read. This is, however, safe.
4149 */
4150 if (pmap->pm_stats.resident_count == 0)
4151 return;
4152
4153 SLIST_INIT(&free);
4154
4155 rw_wlock(&pvh_global_lock);
4156 sched_pin();
4157 PMAP_LOCK(pmap);
4158
4159 /*
4160 * Special handling of removing one page. A very common
4161 * operation and easy to short circuit some code.
4162 */
4163 if (sva + PAGE_SIZE == eva) {
4164 pte1 = pte1_load(pmap_pte1(pmap, sva));
4165 if (pte1_is_link(pte1)) {
4166 pmap_remove_page(pmap, sva, &free);
4167 goto out;
4168 }
4169 }
4170
4171 for (; sva < eva; sva = nextva) {
4172 /*
4173 * Calculate address for next L2 page table.
4174 */
4175 nextva = pte1_trunc(sva + PTE1_SIZE);
4176 if (nextva < sva)
4177 nextva = eva;
4178 if (pmap->pm_stats.resident_count == 0)
4179 break;
4180
4181 pte1p = pmap_pte1(pmap, sva);
4182 pte1 = pte1_load(pte1p);
4183
4184 /*
4185 * Weed out invalid mappings. Note: we assume that the L1 page
4186 * table is always allocated, and in kernel virtual.
4187 */
4188 if (pte1 == 0)
4189 continue;
4190
4191 if (pte1_is_section(pte1)) {
4192 /*
4193 * Are we removing the entire large page? If not,
4194 * demote the mapping and fall through.
4195 */
4196 if (sva + PTE1_SIZE == nextva && eva >= nextva) {
4197 pmap_remove_pte1(pmap, pte1p, sva, &free);
4198 continue;
4199 } else if (!pmap_demote_pte1(pmap, pte1p, sva)) {
4200 /* The large page mapping was destroyed. */
4201 continue;
4202 }
4203 #ifdef INVARIANTS
4204 else {
4205 /* Update pte1 after demotion. */
4206 pte1 = pte1_load(pte1p);
4207 }
4208 #endif
4209 }
4210
4211 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p"
4212 " is not link", __func__, pmap, sva, pte1, pte1p));
4213
4214 /*
4215 * Limit our scan to either the end of the va represented
4216 * by the current L2 page table page, or to the end of the
4217 * range being removed.
4218 */
4219 if (nextva > eva)
4220 nextva = eva;
4221
4222 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva;
4223 pte2p++, sva += PAGE_SIZE) {
4224 pte2 = pte2_load(pte2p);
4225 if (!pte2_is_valid(pte2))
4226 continue;
4227 if (pmap_remove_pte2(pmap, pte2p, sva, &free))
4228 break;
4229 }
4230 }
4231 out:
4232 sched_unpin();
4233 rw_wunlock(&pvh_global_lock);
4234 PMAP_UNLOCK(pmap);
4235 vm_page_free_pages_toq(&free, false);
4236 }
4237
4238 /*
4239 * Routine: pmap_remove_all
4240 * Function:
4241 * Removes this physical page from
4242 * all physical maps in which it resides.
4243 * Reflects back modify bits to the pager.
4244 *
4245 * Notes:
4246 * Original versions of this routine were very
4247 * inefficient because they iteratively called
4248 * pmap_remove (slow...)
4249 */
4250
4251 void
pmap_remove_all(vm_page_t m)4252 pmap_remove_all(vm_page_t m)
4253 {
4254 struct md_page *pvh;
4255 pv_entry_t pv;
4256 pmap_t pmap;
4257 pt2_entry_t *pte2p, opte2;
4258 pt1_entry_t *pte1p;
4259 vm_offset_t va;
4260 struct spglist free;
4261
4262 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4263 ("%s: page %p is not managed", __func__, m));
4264 SLIST_INIT(&free);
4265 rw_wlock(&pvh_global_lock);
4266 sched_pin();
4267 if ((m->flags & PG_FICTITIOUS) != 0)
4268 goto small_mappings;
4269 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4270 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
4271 va = pv->pv_va;
4272 pmap = PV_PMAP(pv);
4273 PMAP_LOCK(pmap);
4274 pte1p = pmap_pte1(pmap, va);
4275 (void)pmap_demote_pte1(pmap, pte1p, va);
4276 PMAP_UNLOCK(pmap);
4277 }
4278 small_mappings:
4279 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4280 pmap = PV_PMAP(pv);
4281 PMAP_LOCK(pmap);
4282 pmap->pm_stats.resident_count--;
4283 pte1p = pmap_pte1(pmap, pv->pv_va);
4284 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found "
4285 "a 1mpage in page %p's pv list", __func__, m));
4286 pte2p = pmap_pte2_quick(pmap, pv->pv_va);
4287 opte2 = pte2_load_clear(pte2p);
4288 pmap_tlb_flush(pmap, pv->pv_va);
4289 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %x zero pte2",
4290 __func__, pmap, pv->pv_va));
4291 if (pte2_is_wired(opte2))
4292 pmap->pm_stats.wired_count--;
4293 if (opte2 & PTE2_A)
4294 vm_page_aflag_set(m, PGA_REFERENCED);
4295
4296 /*
4297 * Update the vm_page_t clean and reference bits.
4298 */
4299 if (pte2_is_dirty(opte2))
4300 vm_page_dirty(m);
4301 pmap_unuse_pt2(pmap, pv->pv_va, &free);
4302 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4303 free_pv_entry(pmap, pv);
4304 PMAP_UNLOCK(pmap);
4305 }
4306 vm_page_aflag_clear(m, PGA_WRITEABLE);
4307 sched_unpin();
4308 rw_wunlock(&pvh_global_lock);
4309 vm_page_free_pages_toq(&free, false);
4310 }
4311
4312 /*
4313 * Just subroutine for pmap_remove_pages() to reasonably satisfy
4314 * good coding style, a.k.a. 80 character line width limit hell.
4315 */
4316 static __inline void
pmap_remove_pte1_quick(pmap_t pmap,pt1_entry_t pte1,pv_entry_t pv,struct spglist * free)4317 pmap_remove_pte1_quick(pmap_t pmap, pt1_entry_t pte1, pv_entry_t pv,
4318 struct spglist *free)
4319 {
4320 vm_paddr_t pa;
4321 vm_page_t m, mt, mpt2pg;
4322 struct md_page *pvh;
4323
4324 pa = pte1_pa(pte1);
4325 m = PHYS_TO_VM_PAGE(pa);
4326
4327 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x",
4328 __func__, m, m->phys_addr, pa));
4329 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4330 m < &vm_page_array[vm_page_array_size],
4331 ("%s: bad pte1 %#x", __func__, pte1));
4332
4333 if (pte1_is_dirty(pte1)) {
4334 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++)
4335 vm_page_dirty(mt);
4336 }
4337
4338 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE;
4339 pvh = pa_to_pvh(pa);
4340 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4341 if (TAILQ_EMPTY(&pvh->pv_list)) {
4342 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++)
4343 if (TAILQ_EMPTY(&mt->md.pv_list))
4344 vm_page_aflag_clear(mt, PGA_WRITEABLE);
4345 }
4346 mpt2pg = pmap_pt2_page(pmap, pv->pv_va);
4347 if (mpt2pg != NULL)
4348 pmap_unwire_pt2_all(pmap, pv->pv_va, mpt2pg, free);
4349 }
4350
4351 /*
4352 * Just subroutine for pmap_remove_pages() to reasonably satisfy
4353 * good coding style, a.k.a. 80 character line width limit hell.
4354 */
4355 static __inline void
pmap_remove_pte2_quick(pmap_t pmap,pt2_entry_t pte2,pv_entry_t pv,struct spglist * free)4356 pmap_remove_pte2_quick(pmap_t pmap, pt2_entry_t pte2, pv_entry_t pv,
4357 struct spglist *free)
4358 {
4359 vm_paddr_t pa;
4360 vm_page_t m;
4361 struct md_page *pvh;
4362
4363 pa = pte2_pa(pte2);
4364 m = PHYS_TO_VM_PAGE(pa);
4365
4366 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x",
4367 __func__, m, m->phys_addr, pa));
4368 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4369 m < &vm_page_array[vm_page_array_size],
4370 ("%s: bad pte2 %#x", __func__, pte2));
4371
4372 if (pte2_is_dirty(pte2))
4373 vm_page_dirty(m);
4374
4375 pmap->pm_stats.resident_count--;
4376 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4377 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
4378 pvh = pa_to_pvh(pa);
4379 if (TAILQ_EMPTY(&pvh->pv_list))
4380 vm_page_aflag_clear(m, PGA_WRITEABLE);
4381 }
4382 pmap_unuse_pt2(pmap, pv->pv_va, free);
4383 }
4384
4385 /*
4386 * Remove all pages from specified address space this aids process
4387 * exit speeds. Also, this code is special cased for current process
4388 * only, but can have the more generic (and slightly slower) mode enabled.
4389 * This is much faster than pmap_remove in the case of running down
4390 * an entire address space.
4391 */
4392 void
pmap_remove_pages(pmap_t pmap)4393 pmap_remove_pages(pmap_t pmap)
4394 {
4395 pt1_entry_t *pte1p, pte1;
4396 pt2_entry_t *pte2p, pte2;
4397 pv_entry_t pv;
4398 struct pv_chunk *pc, *npc;
4399 struct spglist free;
4400 int field, idx;
4401 int32_t bit;
4402 uint32_t inuse, bitmask;
4403 bool allfree;
4404
4405 /*
4406 * Assert that the given pmap is only active on the current
4407 * CPU. Unfortunately, we cannot block another CPU from
4408 * activating the pmap while this function is executing.
4409 */
4410 KASSERT(pmap == vmspace_pmap(curthread->td_proc->p_vmspace),
4411 ("%s: non-current pmap %p", __func__, pmap));
4412 #if defined(SMP) && defined(INVARIANTS)
4413 {
4414 cpuset_t other_cpus;
4415
4416 sched_pin();
4417 other_cpus = pmap->pm_active;
4418 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
4419 sched_unpin();
4420 KASSERT(CPU_EMPTY(&other_cpus),
4421 ("%s: pmap %p active on other cpus", __func__, pmap));
4422 }
4423 #endif
4424 SLIST_INIT(&free);
4425 rw_wlock(&pvh_global_lock);
4426 PMAP_LOCK(pmap);
4427 sched_pin();
4428 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4429 KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p",
4430 __func__, pmap, pc->pc_pmap));
4431 allfree = true;
4432 for (field = 0; field < _NPCM; field++) {
4433 inuse = (~(pc->pc_map[field])) & pc_freemask[field];
4434 while (inuse != 0) {
4435 bit = ffs(inuse) - 1;
4436 bitmask = 1UL << bit;
4437 idx = field * 32 + bit;
4438 pv = &pc->pc_pventry[idx];
4439 inuse &= ~bitmask;
4440
4441 /*
4442 * Note that we cannot remove wired pages
4443 * from a process' mapping at this time
4444 */
4445 pte1p = pmap_pte1(pmap, pv->pv_va);
4446 pte1 = pte1_load(pte1p);
4447 if (pte1_is_section(pte1)) {
4448 if (pte1_is_wired(pte1)) {
4449 allfree = false;
4450 continue;
4451 }
4452 pte1_clear(pte1p);
4453 pmap_remove_pte1_quick(pmap, pte1, pv,
4454 &free);
4455 }
4456 else if (pte1_is_link(pte1)) {
4457 pte2p = pt2map_entry(pv->pv_va);
4458 pte2 = pte2_load(pte2p);
4459
4460 if (!pte2_is_valid(pte2)) {
4461 printf("%s: pmap %p va %#x "
4462 "pte2 %#x\n", __func__,
4463 pmap, pv->pv_va, pte2);
4464 panic("bad pte2");
4465 }
4466
4467 if (pte2_is_wired(pte2)) {
4468 allfree = false;
4469 continue;
4470 }
4471 pte2_clear(pte2p);
4472 pmap_remove_pte2_quick(pmap, pte2, pv,
4473 &free);
4474 } else {
4475 printf("%s: pmap %p va %#x pte1 %#x\n",
4476 __func__, pmap, pv->pv_va, pte1);
4477 panic("bad pte1");
4478 }
4479
4480 /* Mark free */
4481 PV_STAT(pv_entry_frees++);
4482 PV_STAT(pv_entry_spare++);
4483 pv_entry_count--;
4484 pc->pc_map[field] |= bitmask;
4485 }
4486 }
4487 if (allfree) {
4488 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4489 free_pv_chunk(pc);
4490 }
4491 }
4492 tlb_flush_all_ng_local();
4493 sched_unpin();
4494 rw_wunlock(&pvh_global_lock);
4495 PMAP_UNLOCK(pmap);
4496 vm_page_free_pages_toq(&free, false);
4497 }
4498
4499 /*
4500 * This code makes some *MAJOR* assumptions:
4501 * 1. Current pmap & pmap exists.
4502 * 2. Not wired.
4503 * 3. Read access.
4504 * 4. No L2 page table pages.
4505 * but is *MUCH* faster than pmap_enter...
4506 */
4507 static vm_page_t
pmap_enter_quick_locked(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,vm_page_t mpt2pg)4508 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
4509 vm_prot_t prot, vm_page_t mpt2pg)
4510 {
4511 pt2_entry_t *pte2p, pte2;
4512 vm_paddr_t pa;
4513 struct spglist free;
4514 uint32_t l2prot;
4515
4516 KASSERT(!VA_IS_CLEANMAP(va) ||
4517 (m->oflags & VPO_UNMANAGED) != 0,
4518 ("%s: managed mapping within the clean submap", __func__));
4519 rw_assert(&pvh_global_lock, RA_WLOCKED);
4520 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4521
4522 /*
4523 * In the case that a L2 page table page is not
4524 * resident, we are creating it here.
4525 */
4526 if (va < VM_MAXUSER_ADDRESS) {
4527 u_int pte1_idx;
4528 pt1_entry_t pte1, *pte1p;
4529 vm_paddr_t pt2_pa;
4530
4531 /*
4532 * Get L1 page table things.
4533 */
4534 pte1_idx = pte1_index(va);
4535 pte1p = pmap_pte1(pmap, va);
4536 pte1 = pte1_load(pte1p);
4537
4538 if (mpt2pg && (mpt2pg->pindex == (pte1_idx & ~PT2PG_MASK))) {
4539 /*
4540 * Each of NPT2_IN_PG L2 page tables on the page can
4541 * come here. Make sure that associated L1 page table
4542 * link is established.
4543 *
4544 * QQQ: It comes that we don't establish all links to
4545 * L2 page tables for newly allocated L2 page
4546 * tables page.
4547 */
4548 KASSERT(!pte1_is_section(pte1),
4549 ("%s: pte1 %#x is section", __func__, pte1));
4550 if (!pte1_is_link(pte1)) {
4551 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(mpt2pg),
4552 pte1_idx);
4553 pte1_store(pte1p, PTE1_LINK(pt2_pa));
4554 }
4555 pt2_wirecount_inc(mpt2pg, pte1_idx);
4556 } else {
4557 /*
4558 * If the L2 page table page is mapped, we just
4559 * increment the hold count, and activate it.
4560 */
4561 if (pte1_is_section(pte1)) {
4562 return (NULL);
4563 } else if (pte1_is_link(pte1)) {
4564 mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
4565 pt2_wirecount_inc(mpt2pg, pte1_idx);
4566 } else {
4567 mpt2pg = _pmap_allocpte2(pmap, va,
4568 PMAP_ENTER_NOSLEEP);
4569 if (mpt2pg == NULL)
4570 return (NULL);
4571 }
4572 }
4573 } else {
4574 mpt2pg = NULL;
4575 }
4576
4577 /*
4578 * This call to pt2map_entry() makes the assumption that we are
4579 * entering the page into the current pmap. In order to support
4580 * quick entry into any pmap, one would likely use pmap_pte2_quick().
4581 * But that isn't as quick as pt2map_entry().
4582 */
4583 pte2p = pt2map_entry(va);
4584 pte2 = pte2_load(pte2p);
4585 if (pte2_is_valid(pte2)) {
4586 if (mpt2pg != NULL) {
4587 /*
4588 * Remove extra pte2 reference
4589 */
4590 pt2_wirecount_dec(mpt2pg, pte1_index(va));
4591 mpt2pg = NULL;
4592 }
4593 return (NULL);
4594 }
4595
4596 /*
4597 * Enter on the PV list if part of our managed memory.
4598 */
4599 if ((m->oflags & VPO_UNMANAGED) == 0 &&
4600 !pmap_try_insert_pv_entry(pmap, va, m)) {
4601 if (mpt2pg != NULL) {
4602 SLIST_INIT(&free);
4603 if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) {
4604 pmap_tlb_flush(pmap, va);
4605 vm_page_free_pages_toq(&free, false);
4606 }
4607
4608 mpt2pg = NULL;
4609 }
4610 return (NULL);
4611 }
4612
4613 /*
4614 * Increment counters
4615 */
4616 pmap->pm_stats.resident_count++;
4617
4618 /*
4619 * Now validate mapping with RO protection
4620 */
4621 pa = VM_PAGE_TO_PHYS(m);
4622 l2prot = PTE2_RO | PTE2_NM;
4623 if (va < VM_MAXUSER_ADDRESS)
4624 l2prot |= PTE2_U | PTE2_NG;
4625 if ((prot & VM_PROT_EXECUTE) == 0)
4626 l2prot |= PTE2_NX;
4627 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) {
4628 /*
4629 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA
4630 * is set. QQQ: For more info, see comments in pmap_enter().
4631 */
4632 cache_icache_sync_fresh(va, pa, PAGE_SIZE);
4633 }
4634 pte2_store(pte2p, PTE2(pa, l2prot, vm_page_pte2_attr(m)));
4635
4636 return (mpt2pg);
4637 }
4638
4639 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)4640 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
4641 {
4642
4643 rw_wlock(&pvh_global_lock);
4644 PMAP_LOCK(pmap);
4645 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
4646 rw_wunlock(&pvh_global_lock);
4647 PMAP_UNLOCK(pmap);
4648 }
4649
4650 /*
4651 * Tries to create a read- and/or execute-only 1 MB page mapping. Returns
4652 * true if successful. Returns false if (1) a mapping already exists at the
4653 * specified virtual address or (2) a PV entry cannot be allocated without
4654 * reclaiming another PV entry.
4655 */
4656 static bool
pmap_enter_1mpage(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)4657 pmap_enter_1mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
4658 {
4659 pt1_entry_t pte1;
4660 vm_paddr_t pa;
4661
4662 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4663 pa = VM_PAGE_TO_PHYS(m);
4664 pte1 = PTE1(pa, PTE1_NM | PTE1_RO, ATTR_TO_L1(vm_page_pte2_attr(m)));
4665 if ((prot & VM_PROT_EXECUTE) == 0)
4666 pte1 |= PTE1_NX;
4667 if (va < VM_MAXUSER_ADDRESS)
4668 pte1 |= PTE1_U;
4669 if (pmap != kernel_pmap)
4670 pte1 |= PTE1_NG;
4671 return (pmap_enter_pte1(pmap, va, pte1, PMAP_ENTER_NOSLEEP |
4672 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m) == KERN_SUCCESS);
4673 }
4674
4675 /*
4676 * Tries to create the specified 1 MB page mapping. Returns KERN_SUCCESS if
4677 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
4678 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
4679 * a mapping already exists at the specified virtual address. Returns
4680 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NORECLAIM was specified and PV entry
4681 * allocation failed.
4682 */
4683 static int
pmap_enter_pte1(pmap_t pmap,vm_offset_t va,pt1_entry_t pte1,u_int flags,vm_page_t m)4684 pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags,
4685 vm_page_t m)
4686 {
4687 struct spglist free;
4688 pt1_entry_t opte1, *pte1p;
4689 pt2_entry_t pte2, *pte2p;
4690 vm_offset_t cur, end;
4691 vm_page_t mt;
4692
4693 rw_assert(&pvh_global_lock, RA_WLOCKED);
4694 KASSERT((pte1 & (PTE1_NM | PTE1_RO)) == 0 ||
4695 (pte1 & (PTE1_NM | PTE1_RO)) == (PTE1_NM | PTE1_RO),
4696 ("%s: pte1 has inconsistent NM and RO attributes", __func__));
4697 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4698 pte1p = pmap_pte1(pmap, va);
4699 opte1 = pte1_load(pte1p);
4700 if (pte1_is_valid(opte1)) {
4701 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
4702 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p",
4703 __func__, va, pmap);
4704 return (KERN_FAILURE);
4705 }
4706 /* Break the existing mapping(s). */
4707 SLIST_INIT(&free);
4708 if (pte1_is_section(opte1)) {
4709 /*
4710 * If the section resulted from a promotion, then a
4711 * reserved PT page could be freed.
4712 */
4713 pmap_remove_pte1(pmap, pte1p, va, &free);
4714 } else {
4715 sched_pin();
4716 end = va + PTE1_SIZE;
4717 for (cur = va, pte2p = pmap_pte2_quick(pmap, va);
4718 cur != end; cur += PAGE_SIZE, pte2p++) {
4719 pte2 = pte2_load(pte2p);
4720 if (!pte2_is_valid(pte2))
4721 continue;
4722 if (pmap_remove_pte2(pmap, pte2p, cur, &free))
4723 break;
4724 }
4725 sched_unpin();
4726 }
4727 vm_page_free_pages_toq(&free, false);
4728 }
4729 if ((m->oflags & VPO_UNMANAGED) == 0) {
4730 /*
4731 * Abort this mapping if its PV entry could not be created.
4732 */
4733 if (!pmap_pv_insert_pte1(pmap, va, pte1, flags)) {
4734 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p",
4735 __func__, va, pmap);
4736 return (KERN_RESOURCE_SHORTAGE);
4737 }
4738 if ((pte1 & PTE1_RO) == 0) {
4739 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++)
4740 vm_page_aflag_set(mt, PGA_WRITEABLE);
4741 }
4742 }
4743
4744 /*
4745 * Increment counters.
4746 */
4747 if (pte1_is_wired(pte1))
4748 pmap->pm_stats.wired_count += PTE1_SIZE / PAGE_SIZE;
4749 pmap->pm_stats.resident_count += PTE1_SIZE / PAGE_SIZE;
4750
4751 /*
4752 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA
4753 * is set. QQQ: For more info, see comments in pmap_enter().
4754 */
4755 if ((pte1 & PTE1_NX) == 0 && m->md.pat_mode == VM_MEMATTR_WB_WA &&
4756 pmap != kernel_pmap && (!pte1_is_section(opte1) ||
4757 pte1_pa(opte1) != VM_PAGE_TO_PHYS(m) || (opte1 & PTE2_NX) != 0))
4758 cache_icache_sync_fresh(va, VM_PAGE_TO_PHYS(m), PTE1_SIZE);
4759
4760 /*
4761 * Map the section.
4762 */
4763 pte1_store(pte1p, pte1);
4764
4765 pmap_pte1_mappings++;
4766 CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va,
4767 pmap);
4768 return (KERN_SUCCESS);
4769 }
4770
4771 /*
4772 * Maps a sequence of resident pages belonging to the same object.
4773 * The sequence begins with the given page m_start. This page is
4774 * mapped at the given virtual address start. Each subsequent page is
4775 * mapped at a virtual address that is offset from start by the same
4776 * amount as the page is offset from m_start within the object. The
4777 * last page in the sequence is the page with the largest offset from
4778 * m_start that can be mapped at a virtual address less than the given
4779 * virtual address end. Not every virtual page between start and end
4780 * is mapped; only those for which a resident page exists with the
4781 * corresponding offset from m_start are mapped.
4782 */
4783 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)4784 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
4785 vm_page_t m_start, vm_prot_t prot)
4786 {
4787 vm_offset_t va;
4788 vm_page_t m, mpt2pg;
4789 vm_pindex_t diff, psize;
4790
4791 PDEBUG(6, printf("%s: pmap %p start %#x end %#x m %p prot %#x\n",
4792 __func__, pmap, start, end, m_start, prot));
4793
4794 VM_OBJECT_ASSERT_LOCKED(m_start->object);
4795 psize = atop(end - start);
4796 mpt2pg = NULL;
4797 m = m_start;
4798 rw_wlock(&pvh_global_lock);
4799 PMAP_LOCK(pmap);
4800 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
4801 va = start + ptoa(diff);
4802 if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end &&
4803 m->psind == 1 && sp_enabled &&
4804 pmap_enter_1mpage(pmap, va, m, prot))
4805 m = &m[PTE1_SIZE / PAGE_SIZE - 1];
4806 else
4807 mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot,
4808 mpt2pg);
4809 m = TAILQ_NEXT(m, listq);
4810 }
4811 rw_wunlock(&pvh_global_lock);
4812 PMAP_UNLOCK(pmap);
4813 }
4814
4815 /*
4816 * This code maps large physical mmap regions into the
4817 * processor address space. Note that some shortcuts
4818 * are taken, but the code works.
4819 */
4820 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)4821 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
4822 vm_pindex_t pindex, vm_size_t size)
4823 {
4824 pt1_entry_t *pte1p;
4825 vm_paddr_t pa, pte2_pa;
4826 vm_page_t p;
4827 vm_memattr_t pat_mode;
4828 u_int l1attr, l1prot;
4829
4830 VM_OBJECT_ASSERT_WLOCKED(object);
4831 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4832 ("%s: non-device object", __func__));
4833 if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) {
4834 if (!vm_object_populate(object, pindex, pindex + atop(size)))
4835 return;
4836 p = vm_page_lookup(object, pindex);
4837 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4838 ("%s: invalid page %p", __func__, p));
4839 pat_mode = p->md.pat_mode;
4840
4841 /*
4842 * Abort the mapping if the first page is not physically
4843 * aligned to a 1MB page boundary.
4844 */
4845 pte2_pa = VM_PAGE_TO_PHYS(p);
4846 if (pte2_pa & PTE1_OFFSET)
4847 return;
4848
4849 /*
4850 * Skip the first page. Abort the mapping if the rest of
4851 * the pages are not physically contiguous or have differing
4852 * memory attributes.
4853 */
4854 p = TAILQ_NEXT(p, listq);
4855 for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size;
4856 pa += PAGE_SIZE) {
4857 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4858 ("%s: invalid page %p", __func__, p));
4859 if (pa != VM_PAGE_TO_PHYS(p) ||
4860 pat_mode != p->md.pat_mode)
4861 return;
4862 p = TAILQ_NEXT(p, listq);
4863 }
4864
4865 /*
4866 * Map using 1MB pages.
4867 *
4868 * QQQ: Well, we are mapping a section, so same condition must
4869 * be hold like during promotion. It looks that only RW mapping
4870 * is done here, so readonly mapping must be done elsewhere.
4871 */
4872 l1prot = PTE1_U | PTE1_NG | PTE1_RW | PTE1_M | PTE1_A;
4873 l1attr = ATTR_TO_L1(vm_memattr_to_pte2(pat_mode));
4874 PMAP_LOCK(pmap);
4875 for (pa = pte2_pa; pa < pte2_pa + size; pa += PTE1_SIZE) {
4876 pte1p = pmap_pte1(pmap, addr);
4877 if (!pte1_is_valid(pte1_load(pte1p))) {
4878 pte1_store(pte1p, PTE1(pa, l1prot, l1attr));
4879 pmap->pm_stats.resident_count += PTE1_SIZE /
4880 PAGE_SIZE;
4881 pmap_pte1_mappings++;
4882 }
4883 /* Else continue on if the PTE1 is already valid. */
4884 addr += PTE1_SIZE;
4885 }
4886 PMAP_UNLOCK(pmap);
4887 }
4888 }
4889
4890 /*
4891 * Do the things to protect a 1mpage in a process.
4892 */
4893 static void
pmap_protect_pte1(pmap_t pmap,pt1_entry_t * pte1p,vm_offset_t sva,vm_prot_t prot)4894 pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva,
4895 vm_prot_t prot)
4896 {
4897 pt1_entry_t npte1, opte1;
4898 vm_offset_t eva, va;
4899 vm_page_t m;
4900
4901 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4902 KASSERT((sva & PTE1_OFFSET) == 0,
4903 ("%s: sva is not 1mpage aligned", __func__));
4904
4905 opte1 = npte1 = pte1_load(pte1p);
4906 if (pte1_is_managed(opte1) && pte1_is_dirty(opte1)) {
4907 eva = sva + PTE1_SIZE;
4908 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1));
4909 va < eva; va += PAGE_SIZE, m++)
4910 vm_page_dirty(m);
4911 }
4912 if ((prot & VM_PROT_WRITE) == 0)
4913 npte1 |= PTE1_RO | PTE1_NM;
4914 if ((prot & VM_PROT_EXECUTE) == 0)
4915 npte1 |= PTE1_NX;
4916
4917 /*
4918 * QQQ: Herein, execute permission is never set.
4919 * It only can be cleared. So, no icache
4920 * syncing is needed.
4921 */
4922
4923 if (npte1 != opte1) {
4924 pte1_store(pte1p, npte1);
4925 pmap_tlb_flush(pmap, sva);
4926 }
4927 }
4928
4929 /*
4930 * Set the physical protection on the
4931 * specified range of this map as requested.
4932 */
4933 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)4934 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4935 {
4936 bool pv_lists_locked;
4937 vm_offset_t nextva;
4938 pt1_entry_t *pte1p, pte1;
4939 pt2_entry_t *pte2p, opte2, npte2;
4940
4941 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4942 if (prot == VM_PROT_NONE) {
4943 pmap_remove(pmap, sva, eva);
4944 return;
4945 }
4946
4947 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
4948 (VM_PROT_WRITE | VM_PROT_EXECUTE))
4949 return;
4950
4951 if (pmap_is_current(pmap))
4952 pv_lists_locked = false;
4953 else {
4954 pv_lists_locked = true;
4955 resume:
4956 rw_wlock(&pvh_global_lock);
4957 sched_pin();
4958 }
4959
4960 PMAP_LOCK(pmap);
4961 for (; sva < eva; sva = nextva) {
4962 /*
4963 * Calculate address for next L2 page table.
4964 */
4965 nextva = pte1_trunc(sva + PTE1_SIZE);
4966 if (nextva < sva)
4967 nextva = eva;
4968
4969 pte1p = pmap_pte1(pmap, sva);
4970 pte1 = pte1_load(pte1p);
4971
4972 /*
4973 * Weed out invalid mappings. Note: we assume that L1 page
4974 * page table is always allocated, and in kernel virtual.
4975 */
4976 if (pte1 == 0)
4977 continue;
4978
4979 if (pte1_is_section(pte1)) {
4980 /*
4981 * Are we protecting the entire large page? If not,
4982 * demote the mapping and fall through.
4983 */
4984 if (sva + PTE1_SIZE == nextva && eva >= nextva) {
4985 pmap_protect_pte1(pmap, pte1p, sva, prot);
4986 continue;
4987 } else {
4988 if (!pv_lists_locked) {
4989 pv_lists_locked = true;
4990 if (!rw_try_wlock(&pvh_global_lock)) {
4991 PMAP_UNLOCK(pmap);
4992 goto resume;
4993 }
4994 sched_pin();
4995 }
4996 if (!pmap_demote_pte1(pmap, pte1p, sva)) {
4997 /*
4998 * The large page mapping
4999 * was destroyed.
5000 */
5001 continue;
5002 }
5003 #ifdef INVARIANTS
5004 else {
5005 /* Update pte1 after demotion */
5006 pte1 = pte1_load(pte1p);
5007 }
5008 #endif
5009 }
5010 }
5011
5012 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p"
5013 " is not link", __func__, pmap, sva, pte1, pte1p));
5014
5015 /*
5016 * Limit our scan to either the end of the va represented
5017 * by the current L2 page table page, or to the end of the
5018 * range being protected.
5019 */
5020 if (nextva > eva)
5021 nextva = eva;
5022
5023 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++,
5024 sva += PAGE_SIZE) {
5025 vm_page_t m;
5026
5027 opte2 = npte2 = pte2_load(pte2p);
5028 if (!pte2_is_valid(opte2))
5029 continue;
5030
5031 if ((prot & VM_PROT_WRITE) == 0) {
5032 if (pte2_is_managed(opte2) &&
5033 pte2_is_dirty(opte2)) {
5034 m = PHYS_TO_VM_PAGE(pte2_pa(opte2));
5035 vm_page_dirty(m);
5036 }
5037 npte2 |= PTE2_RO | PTE2_NM;
5038 }
5039
5040 if ((prot & VM_PROT_EXECUTE) == 0)
5041 npte2 |= PTE2_NX;
5042
5043 /*
5044 * QQQ: Herein, execute permission is never set.
5045 * It only can be cleared. So, no icache
5046 * syncing is needed.
5047 */
5048
5049 if (npte2 != opte2) {
5050 pte2_store(pte2p, npte2);
5051 pmap_tlb_flush(pmap, sva);
5052 }
5053 }
5054 }
5055 if (pv_lists_locked) {
5056 sched_unpin();
5057 rw_wunlock(&pvh_global_lock);
5058 }
5059 PMAP_UNLOCK(pmap);
5060 }
5061
5062 /*
5063 * pmap_pvh_wired_mappings:
5064 *
5065 * Return the updated number "count" of managed mappings that are wired.
5066 */
5067 static int
pmap_pvh_wired_mappings(struct md_page * pvh,int count)5068 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
5069 {
5070 pmap_t pmap;
5071 pt1_entry_t pte1;
5072 pt2_entry_t pte2;
5073 pv_entry_t pv;
5074
5075 rw_assert(&pvh_global_lock, RA_WLOCKED);
5076 sched_pin();
5077 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5078 pmap = PV_PMAP(pv);
5079 PMAP_LOCK(pmap);
5080 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va));
5081 if (pte1_is_section(pte1)) {
5082 if (pte1_is_wired(pte1))
5083 count++;
5084 } else {
5085 KASSERT(pte1_is_link(pte1),
5086 ("%s: pte1 %#x is not link", __func__, pte1));
5087 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va));
5088 if (pte2_is_wired(pte2))
5089 count++;
5090 }
5091 PMAP_UNLOCK(pmap);
5092 }
5093 sched_unpin();
5094 return (count);
5095 }
5096
5097 /*
5098 * pmap_page_wired_mappings:
5099 *
5100 * Return the number of managed mappings to the given physical page
5101 * that are wired.
5102 */
5103 int
pmap_page_wired_mappings(vm_page_t m)5104 pmap_page_wired_mappings(vm_page_t m)
5105 {
5106 int count;
5107
5108 count = 0;
5109 if ((m->oflags & VPO_UNMANAGED) != 0)
5110 return (count);
5111 rw_wlock(&pvh_global_lock);
5112 count = pmap_pvh_wired_mappings(&m->md, count);
5113 if ((m->flags & PG_FICTITIOUS) == 0) {
5114 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
5115 count);
5116 }
5117 rw_wunlock(&pvh_global_lock);
5118 return (count);
5119 }
5120
5121 /*
5122 * Returns true if any of the given mappings were used to modify
5123 * physical memory. Otherwise, returns false. Both page and 1mpage
5124 * mappings are supported.
5125 */
5126 static bool
pmap_is_modified_pvh(struct md_page * pvh)5127 pmap_is_modified_pvh(struct md_page *pvh)
5128 {
5129 pv_entry_t pv;
5130 pt1_entry_t pte1;
5131 pt2_entry_t pte2;
5132 pmap_t pmap;
5133 bool rv;
5134
5135 rw_assert(&pvh_global_lock, RA_WLOCKED);
5136 rv = false;
5137 sched_pin();
5138 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5139 pmap = PV_PMAP(pv);
5140 PMAP_LOCK(pmap);
5141 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va));
5142 if (pte1_is_section(pte1)) {
5143 rv = pte1_is_dirty(pte1);
5144 } else {
5145 KASSERT(pte1_is_link(pte1),
5146 ("%s: pte1 %#x is not link", __func__, pte1));
5147 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va));
5148 rv = pte2_is_dirty(pte2);
5149 }
5150 PMAP_UNLOCK(pmap);
5151 if (rv)
5152 break;
5153 }
5154 sched_unpin();
5155 return (rv);
5156 }
5157
5158 /*
5159 * pmap_is_modified:
5160 *
5161 * Return whether or not the specified physical page was modified
5162 * in any physical maps.
5163 */
5164 bool
pmap_is_modified(vm_page_t m)5165 pmap_is_modified(vm_page_t m)
5166 {
5167 bool rv;
5168
5169 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5170 ("%s: page %p is not managed", __func__, m));
5171
5172 /*
5173 * If the page is not busied then this check is racy.
5174 */
5175 if (!pmap_page_is_write_mapped(m))
5176 return (false);
5177 rw_wlock(&pvh_global_lock);
5178 rv = pmap_is_modified_pvh(&m->md) ||
5179 ((m->flags & PG_FICTITIOUS) == 0 &&
5180 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
5181 rw_wunlock(&pvh_global_lock);
5182 return (rv);
5183 }
5184
5185 /*
5186 * pmap_is_prefaultable:
5187 *
5188 * Return whether or not the specified virtual address is eligible
5189 * for prefault.
5190 */
5191 bool
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)5192 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
5193 {
5194 pt1_entry_t pte1;
5195 pt2_entry_t pte2;
5196 bool rv;
5197
5198 rv = false;
5199 PMAP_LOCK(pmap);
5200 pte1 = pte1_load(pmap_pte1(pmap, addr));
5201 if (pte1_is_link(pte1)) {
5202 pte2 = pte2_load(pt2map_entry(addr));
5203 rv = !pte2_is_valid(pte2) ;
5204 }
5205 PMAP_UNLOCK(pmap);
5206 return (rv);
5207 }
5208
5209 /*
5210 * Returns true if any of the given mappings were referenced and false
5211 * otherwise. Both page and 1mpage mappings are supported.
5212 */
5213 static bool
pmap_is_referenced_pvh(struct md_page * pvh)5214 pmap_is_referenced_pvh(struct md_page *pvh)
5215 {
5216
5217 pv_entry_t pv;
5218 pt1_entry_t pte1;
5219 pt2_entry_t pte2;
5220 pmap_t pmap;
5221 bool rv;
5222
5223 rw_assert(&pvh_global_lock, RA_WLOCKED);
5224 rv = false;
5225 sched_pin();
5226 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5227 pmap = PV_PMAP(pv);
5228 PMAP_LOCK(pmap);
5229 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va));
5230 if (pte1_is_section(pte1)) {
5231 rv = (pte1 & (PTE1_A | PTE1_V)) == (PTE1_A | PTE1_V);
5232 } else {
5233 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va));
5234 rv = (pte2 & (PTE2_A | PTE2_V)) == (PTE2_A | PTE2_V);
5235 }
5236 PMAP_UNLOCK(pmap);
5237 if (rv)
5238 break;
5239 }
5240 sched_unpin();
5241 return (rv);
5242 }
5243
5244 /*
5245 * pmap_is_referenced:
5246 *
5247 * Return whether or not the specified physical page was referenced
5248 * in any physical maps.
5249 */
5250 bool
pmap_is_referenced(vm_page_t m)5251 pmap_is_referenced(vm_page_t m)
5252 {
5253 bool rv;
5254
5255 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5256 ("%s: page %p is not managed", __func__, m));
5257 rw_wlock(&pvh_global_lock);
5258 rv = pmap_is_referenced_pvh(&m->md) ||
5259 ((m->flags & PG_FICTITIOUS) == 0 &&
5260 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
5261 rw_wunlock(&pvh_global_lock);
5262 return (rv);
5263 }
5264
5265 /*
5266 * pmap_ts_referenced:
5267 *
5268 * Return a count of reference bits for a page, clearing those bits.
5269 * It is not necessary for every reference bit to be cleared, but it
5270 * is necessary that 0 only be returned when there are truly no
5271 * reference bits set.
5272 *
5273 * As an optimization, update the page's dirty field if a modified bit is
5274 * found while counting reference bits. This opportunistic update can be
5275 * performed at low cost and can eliminate the need for some future calls
5276 * to pmap_is_modified(). However, since this function stops after
5277 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
5278 * dirty pages. Those dirty pages will only be detected by a future call
5279 * to pmap_is_modified().
5280 */
5281 int
pmap_ts_referenced(vm_page_t m)5282 pmap_ts_referenced(vm_page_t m)
5283 {
5284 struct md_page *pvh;
5285 pv_entry_t pv, pvf;
5286 pmap_t pmap;
5287 pt1_entry_t *pte1p, opte1;
5288 pt2_entry_t *pte2p, opte2;
5289 vm_paddr_t pa;
5290 int rtval = 0;
5291
5292 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5293 ("%s: page %p is not managed", __func__, m));
5294 pa = VM_PAGE_TO_PHYS(m);
5295 pvh = pa_to_pvh(pa);
5296 rw_wlock(&pvh_global_lock);
5297 sched_pin();
5298 if ((m->flags & PG_FICTITIOUS) != 0 ||
5299 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
5300 goto small_mappings;
5301 pv = pvf;
5302 do {
5303 pmap = PV_PMAP(pv);
5304 PMAP_LOCK(pmap);
5305 pte1p = pmap_pte1(pmap, pv->pv_va);
5306 opte1 = pte1_load(pte1p);
5307 if (pte1_is_dirty(opte1)) {
5308 /*
5309 * Although "opte1" is mapping a 1MB page, because
5310 * this function is called at a 4KB page granularity,
5311 * we only update the 4KB page under test.
5312 */
5313 vm_page_dirty(m);
5314 }
5315 if ((opte1 & PTE1_A) != 0) {
5316 /*
5317 * Since this reference bit is shared by 256 4KB pages,
5318 * it should not be cleared every time it is tested.
5319 * Apply a simple "hash" function on the physical page
5320 * number, the virtual section number, and the pmap
5321 * address to select one 4KB page out of the 256
5322 * on which testing the reference bit will result
5323 * in clearing that bit. This function is designed
5324 * to avoid the selection of the same 4KB page
5325 * for every 1MB page mapping.
5326 *
5327 * On demotion, a mapping that hasn't been referenced
5328 * is simply destroyed. To avoid the possibility of a
5329 * subsequent page fault on a demoted wired mapping,
5330 * always leave its reference bit set. Moreover,
5331 * since the section is wired, the current state of
5332 * its reference bit won't affect page replacement.
5333 */
5334 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PTE1_SHIFT) ^
5335 (uintptr_t)pmap) & (NPTE2_IN_PG - 1)) == 0 &&
5336 !pte1_is_wired(opte1)) {
5337 pte1_clear_bit(pte1p, PTE1_A);
5338 pmap_tlb_flush(pmap, pv->pv_va);
5339 }
5340 rtval++;
5341 }
5342 PMAP_UNLOCK(pmap);
5343 /* Rotate the PV list if it has more than one entry. */
5344 if (TAILQ_NEXT(pv, pv_next) != NULL) {
5345 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5346 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5347 }
5348 if (rtval >= PMAP_TS_REFERENCED_MAX)
5349 goto out;
5350 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
5351 small_mappings:
5352 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
5353 goto out;
5354 pv = pvf;
5355 do {
5356 pmap = PV_PMAP(pv);
5357 PMAP_LOCK(pmap);
5358 pte1p = pmap_pte1(pmap, pv->pv_va);
5359 KASSERT(pte1_is_link(pte1_load(pte1p)),
5360 ("%s: not found a link in page %p's pv list", __func__, m));
5361
5362 pte2p = pmap_pte2_quick(pmap, pv->pv_va);
5363 opte2 = pte2_load(pte2p);
5364 if (pte2_is_dirty(opte2))
5365 vm_page_dirty(m);
5366 if ((opte2 & PTE2_A) != 0) {
5367 pte2_clear_bit(pte2p, PTE2_A);
5368 pmap_tlb_flush(pmap, pv->pv_va);
5369 rtval++;
5370 }
5371 PMAP_UNLOCK(pmap);
5372 /* Rotate the PV list if it has more than one entry. */
5373 if (TAILQ_NEXT(pv, pv_next) != NULL) {
5374 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5375 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5376 }
5377 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval <
5378 PMAP_TS_REFERENCED_MAX);
5379 out:
5380 sched_unpin();
5381 rw_wunlock(&pvh_global_lock);
5382 return (rtval);
5383 }
5384
5385 /*
5386 * Clear the wired attribute from the mappings for the specified range of
5387 * addresses in the given pmap. Every valid mapping within that range
5388 * must have the wired attribute set. In contrast, invalid mappings
5389 * cannot have the wired attribute set, so they are ignored.
5390 *
5391 * The wired attribute of the page table entry is not a hardware feature,
5392 * so there is no need to invalidate any TLB entries.
5393 */
5394 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)5395 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5396 {
5397 vm_offset_t nextva;
5398 pt1_entry_t *pte1p, pte1;
5399 pt2_entry_t *pte2p, pte2;
5400 bool pv_lists_locked;
5401
5402 if (pmap_is_current(pmap))
5403 pv_lists_locked = false;
5404 else {
5405 pv_lists_locked = true;
5406 resume:
5407 rw_wlock(&pvh_global_lock);
5408 sched_pin();
5409 }
5410 PMAP_LOCK(pmap);
5411 for (; sva < eva; sva = nextva) {
5412 nextva = pte1_trunc(sva + PTE1_SIZE);
5413 if (nextva < sva)
5414 nextva = eva;
5415
5416 pte1p = pmap_pte1(pmap, sva);
5417 pte1 = pte1_load(pte1p);
5418
5419 /*
5420 * Weed out invalid mappings. Note: we assume that L1 page
5421 * page table is always allocated, and in kernel virtual.
5422 */
5423 if (pte1 == 0)
5424 continue;
5425
5426 if (pte1_is_section(pte1)) {
5427 if (!pte1_is_wired(pte1))
5428 panic("%s: pte1 %#x not wired", __func__, pte1);
5429
5430 /*
5431 * Are we unwiring the entire large page? If not,
5432 * demote the mapping and fall through.
5433 */
5434 if (sva + PTE1_SIZE == nextva && eva >= nextva) {
5435 pte1_clear_bit(pte1p, PTE1_W);
5436 pmap->pm_stats.wired_count -= PTE1_SIZE /
5437 PAGE_SIZE;
5438 continue;
5439 } else {
5440 if (!pv_lists_locked) {
5441 pv_lists_locked = true;
5442 if (!rw_try_wlock(&pvh_global_lock)) {
5443 PMAP_UNLOCK(pmap);
5444 /* Repeat sva. */
5445 goto resume;
5446 }
5447 sched_pin();
5448 }
5449 if (!pmap_demote_pte1(pmap, pte1p, sva))
5450 panic("%s: demotion failed", __func__);
5451 #ifdef INVARIANTS
5452 else {
5453 /* Update pte1 after demotion */
5454 pte1 = pte1_load(pte1p);
5455 }
5456 #endif
5457 }
5458 }
5459
5460 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p"
5461 " is not link", __func__, pmap, sva, pte1, pte1p));
5462
5463 /*
5464 * Limit our scan to either the end of the va represented
5465 * by the current L2 page table page, or to the end of the
5466 * range being protected.
5467 */
5468 if (nextva > eva)
5469 nextva = eva;
5470
5471 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++,
5472 sva += PAGE_SIZE) {
5473 pte2 = pte2_load(pte2p);
5474 if (!pte2_is_valid(pte2))
5475 continue;
5476 if (!pte2_is_wired(pte2))
5477 panic("%s: pte2 %#x is missing PTE2_W",
5478 __func__, pte2);
5479
5480 /*
5481 * PTE2_W must be cleared atomically. Although the pmap
5482 * lock synchronizes access to PTE2_W, another processor
5483 * could be changing PTE2_NM and/or PTE2_A concurrently.
5484 */
5485 pte2_clear_bit(pte2p, PTE2_W);
5486 pmap->pm_stats.wired_count--;
5487 }
5488 }
5489 if (pv_lists_locked) {
5490 sched_unpin();
5491 rw_wunlock(&pvh_global_lock);
5492 }
5493 PMAP_UNLOCK(pmap);
5494 }
5495
5496 /*
5497 * Clear the write and modified bits in each of the given page's mappings.
5498 */
5499 void
pmap_remove_write(vm_page_t m)5500 pmap_remove_write(vm_page_t m)
5501 {
5502 struct md_page *pvh;
5503 pv_entry_t next_pv, pv;
5504 pmap_t pmap;
5505 pt1_entry_t *pte1p;
5506 pt2_entry_t *pte2p, opte2;
5507 vm_offset_t va;
5508
5509 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5510 ("%s: page %p is not managed", __func__, m));
5511 vm_page_assert_busied(m);
5512
5513 if (!pmap_page_is_write_mapped(m))
5514 return;
5515 rw_wlock(&pvh_global_lock);
5516 sched_pin();
5517 if ((m->flags & PG_FICTITIOUS) != 0)
5518 goto small_mappings;
5519 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5520 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5521 va = pv->pv_va;
5522 pmap = PV_PMAP(pv);
5523 PMAP_LOCK(pmap);
5524 pte1p = pmap_pte1(pmap, va);
5525 if (!(pte1_load(pte1p) & PTE1_RO))
5526 (void)pmap_demote_pte1(pmap, pte1p, va);
5527 PMAP_UNLOCK(pmap);
5528 }
5529 small_mappings:
5530 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5531 pmap = PV_PMAP(pv);
5532 PMAP_LOCK(pmap);
5533 pte1p = pmap_pte1(pmap, pv->pv_va);
5534 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found"
5535 " a section in page %p's pv list", __func__, m));
5536 pte2p = pmap_pte2_quick(pmap, pv->pv_va);
5537 opte2 = pte2_load(pte2p);
5538 if (!(opte2 & PTE2_RO)) {
5539 pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM);
5540 if (pte2_is_dirty(opte2))
5541 vm_page_dirty(m);
5542 pmap_tlb_flush(pmap, pv->pv_va);
5543 }
5544 PMAP_UNLOCK(pmap);
5545 }
5546 vm_page_aflag_clear(m, PGA_WRITEABLE);
5547 sched_unpin();
5548 rw_wunlock(&pvh_global_lock);
5549 }
5550
5551 /*
5552 * Apply the given advice to the specified range of addresses within the
5553 * given pmap. Depending on the advice, clear the referenced and/or
5554 * modified flags in each mapping and set the mapped page's dirty field.
5555 */
5556 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)5557 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
5558 {
5559 pt1_entry_t *pte1p, opte1;
5560 pt2_entry_t *pte2p, pte2;
5561 vm_offset_t pdnxt;
5562 vm_page_t m;
5563 bool pv_lists_locked;
5564
5565 if (advice != MADV_DONTNEED && advice != MADV_FREE)
5566 return;
5567 if (pmap_is_current(pmap))
5568 pv_lists_locked = false;
5569 else {
5570 pv_lists_locked = true;
5571 resume:
5572 rw_wlock(&pvh_global_lock);
5573 sched_pin();
5574 }
5575 PMAP_LOCK(pmap);
5576 for (; sva < eva; sva = pdnxt) {
5577 pdnxt = pte1_trunc(sva + PTE1_SIZE);
5578 if (pdnxt < sva)
5579 pdnxt = eva;
5580 pte1p = pmap_pte1(pmap, sva);
5581 opte1 = pte1_load(pte1p);
5582 if (!pte1_is_valid(opte1)) /* XXX */
5583 continue;
5584 else if (pte1_is_section(opte1)) {
5585 if (!pte1_is_managed(opte1))
5586 continue;
5587 if (!pv_lists_locked) {
5588 pv_lists_locked = true;
5589 if (!rw_try_wlock(&pvh_global_lock)) {
5590 PMAP_UNLOCK(pmap);
5591 goto resume;
5592 }
5593 sched_pin();
5594 }
5595 if (!pmap_demote_pte1(pmap, pte1p, sva)) {
5596 /*
5597 * The large page mapping was destroyed.
5598 */
5599 continue;
5600 }
5601
5602 /*
5603 * Unless the page mappings are wired, remove the
5604 * mapping to a single page so that a subsequent
5605 * access may repromote. Since the underlying L2 page
5606 * table is fully populated, this removal never
5607 * frees a L2 page table page.
5608 */
5609 if (!pte1_is_wired(opte1)) {
5610 pte2p = pmap_pte2_quick(pmap, sva);
5611 KASSERT(pte2_is_valid(pte2_load(pte2p)),
5612 ("%s: invalid PTE2", __func__));
5613 pmap_remove_pte2(pmap, pte2p, sva, NULL);
5614 }
5615 }
5616 if (pdnxt > eva)
5617 pdnxt = eva;
5618 for (pte2p = pmap_pte2_quick(pmap, sva); sva != pdnxt; pte2p++,
5619 sva += PAGE_SIZE) {
5620 pte2 = pte2_load(pte2p);
5621 if (!pte2_is_valid(pte2) || !pte2_is_managed(pte2))
5622 continue;
5623 else if (pte2_is_dirty(pte2)) {
5624 if (advice == MADV_DONTNEED) {
5625 /*
5626 * Future calls to pmap_is_modified()
5627 * can be avoided by making the page
5628 * dirty now.
5629 */
5630 m = PHYS_TO_VM_PAGE(pte2_pa(pte2));
5631 vm_page_dirty(m);
5632 }
5633 pte2_set_bit(pte2p, PTE2_NM);
5634 pte2_clear_bit(pte2p, PTE2_A);
5635 } else if ((pte2 & PTE2_A) != 0)
5636 pte2_clear_bit(pte2p, PTE2_A);
5637 else
5638 continue;
5639 pmap_tlb_flush(pmap, sva);
5640 }
5641 }
5642 if (pv_lists_locked) {
5643 sched_unpin();
5644 rw_wunlock(&pvh_global_lock);
5645 }
5646 PMAP_UNLOCK(pmap);
5647 }
5648
5649 /*
5650 * Clear the modify bits on the specified physical page.
5651 */
5652 void
pmap_clear_modify(vm_page_t m)5653 pmap_clear_modify(vm_page_t m)
5654 {
5655 struct md_page *pvh;
5656 pv_entry_t next_pv, pv;
5657 pmap_t pmap;
5658 pt1_entry_t *pte1p, opte1;
5659 pt2_entry_t *pte2p, opte2;
5660 vm_offset_t va;
5661
5662 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5663 ("%s: page %p is not managed", __func__, m));
5664 vm_page_assert_busied(m);
5665
5666 if (!pmap_page_is_write_mapped(m))
5667 return;
5668 rw_wlock(&pvh_global_lock);
5669 sched_pin();
5670 if ((m->flags & PG_FICTITIOUS) != 0)
5671 goto small_mappings;
5672 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5673 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5674 va = pv->pv_va;
5675 pmap = PV_PMAP(pv);
5676 PMAP_LOCK(pmap);
5677 pte1p = pmap_pte1(pmap, va);
5678 opte1 = pte1_load(pte1p);
5679 if (!(opte1 & PTE1_RO)) {
5680 if (pmap_demote_pte1(pmap, pte1p, va) &&
5681 !pte1_is_wired(opte1)) {
5682 /*
5683 * Write protect the mapping to a
5684 * single page so that a subsequent
5685 * write access may repromote.
5686 */
5687 va += VM_PAGE_TO_PHYS(m) - pte1_pa(opte1);
5688 pte2p = pmap_pte2_quick(pmap, va);
5689 opte2 = pte2_load(pte2p);
5690 if ((opte2 & PTE2_V)) {
5691 pte2_set_bit(pte2p, PTE2_NM | PTE2_RO);
5692 vm_page_dirty(m);
5693 pmap_tlb_flush(pmap, va);
5694 }
5695 }
5696 }
5697 PMAP_UNLOCK(pmap);
5698 }
5699 small_mappings:
5700 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5701 pmap = PV_PMAP(pv);
5702 PMAP_LOCK(pmap);
5703 pte1p = pmap_pte1(pmap, pv->pv_va);
5704 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found"
5705 " a section in page %p's pv list", __func__, m));
5706 pte2p = pmap_pte2_quick(pmap, pv->pv_va);
5707 if (pte2_is_dirty(pte2_load(pte2p))) {
5708 pte2_set_bit(pte2p, PTE2_NM);
5709 pmap_tlb_flush(pmap, pv->pv_va);
5710 }
5711 PMAP_UNLOCK(pmap);
5712 }
5713 sched_unpin();
5714 rw_wunlock(&pvh_global_lock);
5715 }
5716
5717 /*
5718 * Sets the memory attribute for the specified page.
5719 */
5720 void
pmap_page_set_memattr(vm_page_t m,vm_memattr_t ma)5721 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5722 {
5723 pt2_entry_t *cmap2_pte2p;
5724 vm_memattr_t oma;
5725 vm_paddr_t pa;
5726 struct pcpu *pc;
5727
5728 oma = m->md.pat_mode;
5729 m->md.pat_mode = ma;
5730
5731 CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m,
5732 VM_PAGE_TO_PHYS(m), oma, ma);
5733 if ((m->flags & PG_FICTITIOUS) != 0)
5734 return;
5735 #if 0
5736 /*
5737 * If "m" is a normal page, flush it from the cache.
5738 *
5739 * First, try to find an existing mapping of the page by sf
5740 * buffer. sf_buf_invalidate_cache() modifies mapping and
5741 * flushes the cache.
5742 */
5743 if (sf_buf_invalidate_cache(m, oma))
5744 return;
5745 #endif
5746 /*
5747 * If page is not mapped by sf buffer, map the page
5748 * transient and do invalidation.
5749 */
5750 if (ma != oma) {
5751 pa = VM_PAGE_TO_PHYS(m);
5752 sched_pin();
5753 pc = get_pcpu();
5754 cmap2_pte2p = pc->pc_cmap2_pte2p;
5755 mtx_lock(&pc->pc_cmap_lock);
5756 if (pte2_load(cmap2_pte2p) != 0)
5757 panic("%s: CMAP2 busy", __func__);
5758 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
5759 vm_memattr_to_pte2(ma)));
5760 dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE);
5761 pte2_clear(cmap2_pte2p);
5762 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5763 sched_unpin();
5764 mtx_unlock(&pc->pc_cmap_lock);
5765 }
5766 }
5767
5768 /*
5769 * Miscellaneous support routines follow
5770 */
5771
5772 /*
5773 * Returns true if the given page is mapped individually or as part of
5774 * a 1mpage. Otherwise, returns false.
5775 */
5776 bool
pmap_page_is_mapped(vm_page_t m)5777 pmap_page_is_mapped(vm_page_t m)
5778 {
5779 bool rv;
5780
5781 if ((m->oflags & VPO_UNMANAGED) != 0)
5782 return (false);
5783 rw_wlock(&pvh_global_lock);
5784 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
5785 ((m->flags & PG_FICTITIOUS) == 0 &&
5786 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
5787 rw_wunlock(&pvh_global_lock);
5788 return (rv);
5789 }
5790
5791 /*
5792 * Returns true if the pmap's pv is one of the first
5793 * 16 pvs linked to from this page. This count may
5794 * be changed upwards or downwards in the future; it
5795 * is only necessary that true be returned for a small
5796 * subset of pmaps for proper page aging.
5797 */
5798 bool
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)5799 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
5800 {
5801 struct md_page *pvh;
5802 pv_entry_t pv;
5803 int loops = 0;
5804 bool rv;
5805
5806 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5807 ("%s: page %p is not managed", __func__, m));
5808 rv = false;
5809 rw_wlock(&pvh_global_lock);
5810 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5811 if (PV_PMAP(pv) == pmap) {
5812 rv = true;
5813 break;
5814 }
5815 loops++;
5816 if (loops >= 16)
5817 break;
5818 }
5819 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
5820 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5821 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5822 if (PV_PMAP(pv) == pmap) {
5823 rv = true;
5824 break;
5825 }
5826 loops++;
5827 if (loops >= 16)
5828 break;
5829 }
5830 }
5831 rw_wunlock(&pvh_global_lock);
5832 return (rv);
5833 }
5834
5835 /*
5836 * pmap_zero_page zeros the specified hardware page by mapping
5837 * the page into KVM and using bzero to clear its contents.
5838 */
5839 void
pmap_zero_page(vm_page_t m)5840 pmap_zero_page(vm_page_t m)
5841 {
5842 pt2_entry_t *cmap2_pte2p;
5843 struct pcpu *pc;
5844
5845 sched_pin();
5846 pc = get_pcpu();
5847 cmap2_pte2p = pc->pc_cmap2_pte2p;
5848 mtx_lock(&pc->pc_cmap_lock);
5849 if (pte2_load(cmap2_pte2p) != 0)
5850 panic("%s: CMAP2 busy", __func__);
5851 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
5852 vm_page_pte2_attr(m)));
5853 pagezero(pc->pc_cmap2_addr);
5854 pte2_clear(cmap2_pte2p);
5855 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5856 sched_unpin();
5857 mtx_unlock(&pc->pc_cmap_lock);
5858 }
5859
5860 /*
5861 * pmap_zero_page_area zeros the specified hardware page by mapping
5862 * the page into KVM and using bzero to clear its contents.
5863 *
5864 * off and size may not cover an area beyond a single hardware page.
5865 */
5866 void
pmap_zero_page_area(vm_page_t m,int off,int size)5867 pmap_zero_page_area(vm_page_t m, int off, int size)
5868 {
5869 pt2_entry_t *cmap2_pte2p;
5870 struct pcpu *pc;
5871
5872 sched_pin();
5873 pc = get_pcpu();
5874 cmap2_pte2p = pc->pc_cmap2_pte2p;
5875 mtx_lock(&pc->pc_cmap_lock);
5876 if (pte2_load(cmap2_pte2p) != 0)
5877 panic("%s: CMAP2 busy", __func__);
5878 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
5879 vm_page_pte2_attr(m)));
5880 if (off == 0 && size == PAGE_SIZE)
5881 pagezero(pc->pc_cmap2_addr);
5882 else
5883 bzero(pc->pc_cmap2_addr + off, size);
5884 pte2_clear(cmap2_pte2p);
5885 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5886 sched_unpin();
5887 mtx_unlock(&pc->pc_cmap_lock);
5888 }
5889
5890 /*
5891 * pmap_copy_page copies the specified (machine independent)
5892 * page by mapping the page into virtual memory and using
5893 * bcopy to copy the page, one machine dependent page at a
5894 * time.
5895 */
5896 void
pmap_copy_page(vm_page_t src,vm_page_t dst)5897 pmap_copy_page(vm_page_t src, vm_page_t dst)
5898 {
5899 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p;
5900 struct pcpu *pc;
5901
5902 sched_pin();
5903 pc = get_pcpu();
5904 cmap1_pte2p = pc->pc_cmap1_pte2p;
5905 cmap2_pte2p = pc->pc_cmap2_pte2p;
5906 mtx_lock(&pc->pc_cmap_lock);
5907 if (pte2_load(cmap1_pte2p) != 0)
5908 panic("%s: CMAP1 busy", __func__);
5909 if (pte2_load(cmap2_pte2p) != 0)
5910 panic("%s: CMAP2 busy", __func__);
5911 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src),
5912 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src)));
5913 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst),
5914 PTE2_AP_KRW, vm_page_pte2_attr(dst)));
5915 bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE);
5916 pte2_clear(cmap1_pte2p);
5917 tlb_flush((vm_offset_t)pc->pc_cmap1_addr);
5918 pte2_clear(cmap2_pte2p);
5919 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5920 sched_unpin();
5921 mtx_unlock(&pc->pc_cmap_lock);
5922 }
5923
5924 int unmapped_buf_allowed = 1;
5925
5926 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)5927 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
5928 vm_offset_t b_offset, int xfersize)
5929 {
5930 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p;
5931 vm_page_t a_pg, b_pg;
5932 char *a_cp, *b_cp;
5933 vm_offset_t a_pg_offset, b_pg_offset;
5934 struct pcpu *pc;
5935 int cnt;
5936
5937 sched_pin();
5938 pc = get_pcpu();
5939 cmap1_pte2p = pc->pc_cmap1_pte2p;
5940 cmap2_pte2p = pc->pc_cmap2_pte2p;
5941 mtx_lock(&pc->pc_cmap_lock);
5942 if (pte2_load(cmap1_pte2p) != 0)
5943 panic("pmap_copy_pages: CMAP1 busy");
5944 if (pte2_load(cmap2_pte2p) != 0)
5945 panic("pmap_copy_pages: CMAP2 busy");
5946 while (xfersize > 0) {
5947 a_pg = ma[a_offset >> PAGE_SHIFT];
5948 a_pg_offset = a_offset & PAGE_MASK;
5949 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
5950 b_pg = mb[b_offset >> PAGE_SHIFT];
5951 b_pg_offset = b_offset & PAGE_MASK;
5952 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
5953 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg),
5954 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg)));
5955 tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr);
5956 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg),
5957 PTE2_AP_KRW, vm_page_pte2_attr(b_pg)));
5958 tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr);
5959 a_cp = pc->pc_cmap1_addr + a_pg_offset;
5960 b_cp = pc->pc_cmap2_addr + b_pg_offset;
5961 bcopy(a_cp, b_cp, cnt);
5962 a_offset += cnt;
5963 b_offset += cnt;
5964 xfersize -= cnt;
5965 }
5966 pte2_clear(cmap1_pte2p);
5967 tlb_flush((vm_offset_t)pc->pc_cmap1_addr);
5968 pte2_clear(cmap2_pte2p);
5969 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
5970 sched_unpin();
5971 mtx_unlock(&pc->pc_cmap_lock);
5972 }
5973
5974 vm_offset_t
pmap_quick_enter_page(vm_page_t m)5975 pmap_quick_enter_page(vm_page_t m)
5976 {
5977 struct pcpu *pc;
5978 pt2_entry_t *pte2p;
5979
5980 critical_enter();
5981 pc = get_pcpu();
5982 pte2p = pc->pc_qmap_pte2p;
5983
5984 KASSERT(pte2_load(pte2p) == 0, ("%s: PTE2 busy", __func__));
5985
5986 pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
5987 vm_page_pte2_attr(m)));
5988 return (pc->pc_qmap_addr);
5989 }
5990
5991 void
pmap_quick_remove_page(vm_offset_t addr)5992 pmap_quick_remove_page(vm_offset_t addr)
5993 {
5994 struct pcpu *pc;
5995 pt2_entry_t *pte2p;
5996
5997 pc = get_pcpu();
5998 pte2p = pc->pc_qmap_pte2p;
5999
6000 KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__));
6001 KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__));
6002
6003 pte2_clear(pte2p);
6004 tlb_flush(pc->pc_qmap_addr);
6005 critical_exit();
6006 }
6007
6008 /*
6009 * Copy the range specified by src_addr/len
6010 * from the source map to the range dst_addr/len
6011 * in the destination map.
6012 *
6013 * This routine is only advisory and need not do anything.
6014 */
6015 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)6016 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
6017 vm_offset_t src_addr)
6018 {
6019 struct spglist free;
6020 vm_offset_t addr;
6021 vm_offset_t end_addr = src_addr + len;
6022 vm_offset_t nextva;
6023
6024 if (dst_addr != src_addr)
6025 return;
6026
6027 if (!pmap_is_current(src_pmap))
6028 return;
6029
6030 rw_wlock(&pvh_global_lock);
6031 if (dst_pmap < src_pmap) {
6032 PMAP_LOCK(dst_pmap);
6033 PMAP_LOCK(src_pmap);
6034 } else {
6035 PMAP_LOCK(src_pmap);
6036 PMAP_LOCK(dst_pmap);
6037 }
6038 sched_pin();
6039 for (addr = src_addr; addr < end_addr; addr = nextva) {
6040 pt2_entry_t *src_pte2p, *dst_pte2p;
6041 vm_page_t dst_mpt2pg, src_mpt2pg;
6042 pt1_entry_t src_pte1;
6043 u_int pte1_idx;
6044
6045 KASSERT(addr < VM_MAXUSER_ADDRESS,
6046 ("%s: invalid to pmap_copy page tables", __func__));
6047
6048 nextva = pte1_trunc(addr + PTE1_SIZE);
6049 if (nextva < addr)
6050 nextva = end_addr;
6051
6052 pte1_idx = pte1_index(addr);
6053 src_pte1 = src_pmap->pm_pt1[pte1_idx];
6054 if (pte1_is_section(src_pte1)) {
6055 if ((addr & PTE1_OFFSET) != 0 ||
6056 (addr + PTE1_SIZE) > end_addr)
6057 continue;
6058 if (dst_pmap->pm_pt1[pte1_idx] == 0 &&
6059 (!pte1_is_managed(src_pte1) ||
6060 pmap_pv_insert_pte1(dst_pmap, addr, src_pte1,
6061 PMAP_ENTER_NORECLAIM))) {
6062 dst_pmap->pm_pt1[pte1_idx] = src_pte1 &
6063 ~PTE1_W;
6064 dst_pmap->pm_stats.resident_count +=
6065 PTE1_SIZE / PAGE_SIZE;
6066 pmap_pte1_mappings++;
6067 }
6068 continue;
6069 } else if (!pte1_is_link(src_pte1))
6070 continue;
6071
6072 src_mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(src_pte1));
6073
6074 /*
6075 * We leave PT2s to be linked from PT1 even if they are not
6076 * referenced until all PT2s in a page are without reference.
6077 *
6078 * QQQ: It could be changed ...
6079 */
6080 #if 0 /* single_pt2_link_is_cleared */
6081 KASSERT(pt2_wirecount_get(src_mpt2pg, pte1_idx) > 0,
6082 ("%s: source page table page is unused", __func__));
6083 #else
6084 if (pt2_wirecount_get(src_mpt2pg, pte1_idx) == 0)
6085 continue;
6086 #endif
6087 if (nextva > end_addr)
6088 nextva = end_addr;
6089
6090 src_pte2p = pt2map_entry(addr);
6091 while (addr < nextva) {
6092 pt2_entry_t temp_pte2;
6093 temp_pte2 = pte2_load(src_pte2p);
6094 /*
6095 * we only virtual copy managed pages
6096 */
6097 if (pte2_is_managed(temp_pte2)) {
6098 dst_mpt2pg = pmap_allocpte2(dst_pmap, addr,
6099 PMAP_ENTER_NOSLEEP);
6100 if (dst_mpt2pg == NULL)
6101 goto out;
6102 dst_pte2p = pmap_pte2_quick(dst_pmap, addr);
6103 if (!pte2_is_valid(pte2_load(dst_pte2p)) &&
6104 pmap_try_insert_pv_entry(dst_pmap, addr,
6105 PHYS_TO_VM_PAGE(pte2_pa(temp_pte2)))) {
6106 /*
6107 * Clear the wired, modified, and
6108 * accessed (referenced) bits
6109 * during the copy.
6110 */
6111 temp_pte2 &= ~(PTE2_W | PTE2_A);
6112 temp_pte2 |= PTE2_NM;
6113 pte2_store(dst_pte2p, temp_pte2);
6114 dst_pmap->pm_stats.resident_count++;
6115 } else {
6116 SLIST_INIT(&free);
6117 if (pmap_unwire_pt2(dst_pmap, addr,
6118 dst_mpt2pg, &free)) {
6119 pmap_tlb_flush(dst_pmap, addr);
6120 vm_page_free_pages_toq(&free,
6121 false);
6122 }
6123 goto out;
6124 }
6125 if (pt2_wirecount_get(dst_mpt2pg, pte1_idx) >=
6126 pt2_wirecount_get(src_mpt2pg, pte1_idx))
6127 break;
6128 }
6129 addr += PAGE_SIZE;
6130 src_pte2p++;
6131 }
6132 }
6133 out:
6134 sched_unpin();
6135 rw_wunlock(&pvh_global_lock);
6136 PMAP_UNLOCK(src_pmap);
6137 PMAP_UNLOCK(dst_pmap);
6138 }
6139
6140 /*
6141 * Increase the starting virtual address of the given mapping if a
6142 * different alignment might result in more section mappings.
6143 */
6144 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)6145 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
6146 vm_offset_t *addr, vm_size_t size)
6147 {
6148 vm_offset_t pte1_offset;
6149
6150 if (size < PTE1_SIZE)
6151 return;
6152 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
6153 offset += ptoa(object->pg_color);
6154 pte1_offset = offset & PTE1_OFFSET;
6155 if (size - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) < PTE1_SIZE ||
6156 (*addr & PTE1_OFFSET) == pte1_offset)
6157 return;
6158 if ((*addr & PTE1_OFFSET) < pte1_offset)
6159 *addr = pte1_trunc(*addr) + pte1_offset;
6160 else
6161 *addr = pte1_roundup(*addr) + pte1_offset;
6162 }
6163
6164 void
pmap_activate(struct thread * td)6165 pmap_activate(struct thread *td)
6166 {
6167 pmap_t pmap, oldpmap;
6168 u_int cpuid, ttb;
6169
6170 PDEBUG(9, printf("%s: td = %08x\n", __func__, (uint32_t)td));
6171
6172 critical_enter();
6173 pmap = vmspace_pmap(td->td_proc->p_vmspace);
6174 oldpmap = PCPU_GET(curpmap);
6175 cpuid = PCPU_GET(cpuid);
6176
6177 #if defined(SMP)
6178 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
6179 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
6180 #else
6181 CPU_CLR(cpuid, &oldpmap->pm_active);
6182 CPU_SET(cpuid, &pmap->pm_active);
6183 #endif
6184
6185 ttb = pmap_ttb_get(pmap);
6186
6187 /*
6188 * pmap_activate is for the current thread on the current cpu
6189 */
6190 td->td_pcb->pcb_pagedir = ttb;
6191 cp15_ttbr_set(ttb);
6192 PCPU_SET(curpmap, pmap);
6193 critical_exit();
6194 }
6195
6196 void
pmap_active_cpus(pmap_t pmap,cpuset_t * res)6197 pmap_active_cpus(pmap_t pmap, cpuset_t *res)
6198 {
6199 *res = pmap->pm_active;
6200 }
6201
6202 /*
6203 * Perform the pmap work for mincore(2). If the page is not both referenced and
6204 * modified by this pmap, returns its physical address so that the caller can
6205 * find other mappings.
6206 */
6207 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)6208 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
6209 {
6210 pt1_entry_t *pte1p, pte1;
6211 pt2_entry_t *pte2p, pte2;
6212 vm_paddr_t pa;
6213 bool managed;
6214 int val;
6215
6216 PMAP_LOCK(pmap);
6217 pte1p = pmap_pte1(pmap, addr);
6218 pte1 = pte1_load(pte1p);
6219 if (pte1_is_section(pte1)) {
6220 pa = trunc_page(pte1_pa(pte1) | (addr & PTE1_OFFSET));
6221 managed = pte1_is_managed(pte1);
6222 val = MINCORE_PSIND(1) | MINCORE_INCORE;
6223 if (pte1_is_dirty(pte1))
6224 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
6225 if (pte1 & PTE1_A)
6226 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
6227 } else if (pte1_is_link(pte1)) {
6228 pte2p = pmap_pte2(pmap, addr);
6229 pte2 = pte2_load(pte2p);
6230 pmap_pte2_release(pte2p);
6231 pa = pte2_pa(pte2);
6232 managed = pte2_is_managed(pte2);
6233 val = MINCORE_INCORE;
6234 if (pte2_is_dirty(pte2))
6235 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
6236 if (pte2 & PTE2_A)
6237 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
6238 } else {
6239 managed = false;
6240 val = 0;
6241 }
6242 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
6243 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
6244 *pap = pa;
6245 }
6246 PMAP_UNLOCK(pmap);
6247 return (val);
6248 }
6249
6250 void
pmap_kenter_device(vm_offset_t va,vm_size_t size,vm_paddr_t pa)6251 pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa)
6252 {
6253 vm_offset_t sva;
6254 uint32_t l2attr;
6255
6256 KASSERT((size & PAGE_MASK) == 0,
6257 ("%s: device mapping not page-sized", __func__));
6258
6259 sva = va;
6260 l2attr = vm_memattr_to_pte2(VM_MEMATTR_DEVICE);
6261 while (size != 0) {
6262 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, l2attr);
6263 va += PAGE_SIZE;
6264 pa += PAGE_SIZE;
6265 size -= PAGE_SIZE;
6266 }
6267 tlb_flush_range(sva, va - sva);
6268 }
6269
6270 void
pmap_kremove_device(vm_offset_t va,vm_size_t size)6271 pmap_kremove_device(vm_offset_t va, vm_size_t size)
6272 {
6273 vm_offset_t sva;
6274
6275 KASSERT((size & PAGE_MASK) == 0,
6276 ("%s: device mapping not page-sized", __func__));
6277
6278 sva = va;
6279 while (size != 0) {
6280 pmap_kremove(va);
6281 va += PAGE_SIZE;
6282 size -= PAGE_SIZE;
6283 }
6284 tlb_flush_range(sva, va - sva);
6285 }
6286
6287 void
pmap_set_pcb_pagedir(pmap_t pmap,struct pcb * pcb)6288 pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb)
6289 {
6290
6291 pcb->pcb_pagedir = pmap_ttb_get(pmap);
6292 }
6293
6294 /*
6295 * Clean L1 data cache range by physical address.
6296 * The range must be within a single page.
6297 */
6298 static void
pmap_dcache_wb_pou(vm_paddr_t pa,vm_size_t size,uint32_t attr)6299 pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr)
6300 {
6301 pt2_entry_t *cmap2_pte2p;
6302 struct pcpu *pc;
6303
6304 KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE,
6305 ("%s: not on single page", __func__));
6306
6307 sched_pin();
6308 pc = get_pcpu();
6309 cmap2_pte2p = pc->pc_cmap2_pte2p;
6310 mtx_lock(&pc->pc_cmap_lock);
6311 if (pte2_load(cmap2_pte2p) != 0)
6312 panic("%s: CMAP2 busy", __func__);
6313 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr));
6314 dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size);
6315 pte2_clear(cmap2_pte2p);
6316 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
6317 sched_unpin();
6318 mtx_unlock(&pc->pc_cmap_lock);
6319 }
6320
6321 /*
6322 * Sync instruction cache range which is not mapped yet.
6323 */
6324 void
cache_icache_sync_fresh(vm_offset_t va,vm_paddr_t pa,vm_size_t size)6325 cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
6326 {
6327 uint32_t len, offset;
6328 vm_page_t m;
6329
6330 /* Write back d-cache on given address range. */
6331 offset = pa & PAGE_MASK;
6332 for ( ; size != 0; size -= len, pa += len, offset = 0) {
6333 len = min(PAGE_SIZE - offset, size);
6334 m = PHYS_TO_VM_PAGE(pa);
6335 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x",
6336 __func__, pa));
6337 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m));
6338 }
6339 /*
6340 * I-cache is VIPT. Only way how to flush all virtual mappings
6341 * on given physical address is to invalidate all i-cache.
6342 */
6343 icache_inv_all();
6344 }
6345
6346 void
pmap_sync_icache(pmap_t pmap,vm_offset_t va,vm_size_t size)6347 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t size)
6348 {
6349
6350 /* Write back d-cache on given address range. */
6351 if (va >= VM_MIN_KERNEL_ADDRESS) {
6352 dcache_wb_pou(va, size);
6353 } else {
6354 uint32_t len, offset;
6355 vm_paddr_t pa;
6356 vm_page_t m;
6357
6358 offset = va & PAGE_MASK;
6359 for ( ; size != 0; size -= len, va += len, offset = 0) {
6360 pa = pmap_extract(pmap, va); /* offset is preserved */
6361 len = min(PAGE_SIZE - offset, size);
6362 m = PHYS_TO_VM_PAGE(pa);
6363 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x",
6364 __func__, pa));
6365 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m));
6366 }
6367 }
6368 /*
6369 * I-cache is VIPT. Only way how to flush all virtual mappings
6370 * on given physical address is to invalidate all i-cache.
6371 */
6372 icache_inv_all();
6373 }
6374
6375 /*
6376 * The implementation of pmap_fault() uses IN_RANGE2() macro which
6377 * depends on the fact that given range size is a power of 2.
6378 */
6379 CTASSERT(powerof2(NB_IN_PT1));
6380 CTASSERT(powerof2(PT2MAP_SIZE));
6381
6382 #define IN_RANGE2(addr, start, size) \
6383 ((vm_offset_t)(start) == ((vm_offset_t)(addr) & ~((size) - 1)))
6384
6385 /*
6386 * Handle access and R/W emulation faults.
6387 */
6388 int
pmap_fault(pmap_t pmap,vm_offset_t far,uint32_t fsr,int idx,bool usermode)6389 pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode)
6390 {
6391 pt1_entry_t *pte1p, pte1;
6392 pt2_entry_t *pte2p, pte2;
6393
6394 if (pmap == NULL)
6395 pmap = kernel_pmap;
6396
6397 /*
6398 * In kernel, we should never get abort with FAR which is in range of
6399 * pmap->pm_pt1 or PT2MAP address spaces. If it happens, stop here
6400 * and print out a useful abort message and even get to the debugger
6401 * otherwise it likely ends with never ending loop of aborts.
6402 */
6403 if (__predict_false(IN_RANGE2(far, pmap->pm_pt1, NB_IN_PT1))) {
6404 /*
6405 * All L1 tables should always be mapped and present.
6406 * However, we check only current one herein. For user mode,
6407 * only permission abort from malicious user is not fatal.
6408 * And alignment abort as it may have higher priority.
6409 */
6410 if (!usermode || (idx != FAULT_ALIGN && idx != FAULT_PERM_L2)) {
6411 CTR4(KTR_PMAP, "%s: pmap %#x pm_pt1 %#x far %#x",
6412 __func__, pmap, pmap->pm_pt1, far);
6413 panic("%s: pm_pt1 abort", __func__);
6414 }
6415 return (KERN_INVALID_ADDRESS);
6416 }
6417 if (__predict_false(IN_RANGE2(far, PT2MAP, PT2MAP_SIZE))) {
6418 /*
6419 * PT2MAP should be always mapped and present in current
6420 * L1 table. However, only existing L2 tables are mapped
6421 * in PT2MAP. For user mode, only L2 translation abort and
6422 * permission abort from malicious user is not fatal.
6423 * And alignment abort as it may have higher priority.
6424 */
6425 if (!usermode || (idx != FAULT_ALIGN &&
6426 idx != FAULT_TRAN_L2 && idx != FAULT_PERM_L2)) {
6427 CTR4(KTR_PMAP, "%s: pmap %#x PT2MAP %#x far %#x",
6428 __func__, pmap, PT2MAP, far);
6429 panic("%s: PT2MAP abort", __func__);
6430 }
6431 return (KERN_INVALID_ADDRESS);
6432 }
6433
6434 /*
6435 * A pmap lock is used below for handling of access and R/W emulation
6436 * aborts. They were handled by atomic operations before so some
6437 * analysis of new situation is needed to answer the following question:
6438 * Is it safe to use the lock even for these aborts?
6439 *
6440 * There may happen two cases in general:
6441 *
6442 * (1) Aborts while the pmap lock is locked already - this should not
6443 * happen as pmap lock is not recursive. However, under pmap lock only
6444 * internal kernel data should be accessed and such data should be
6445 * mapped with A bit set and NM bit cleared. If double abort happens,
6446 * then a mapping of data which has caused it must be fixed. Further,
6447 * all new mappings are always made with A bit set and the bit can be
6448 * cleared only on managed mappings.
6449 *
6450 * (2) Aborts while another lock(s) is/are locked - this already can
6451 * happen. However, there is no difference here if it's either access or
6452 * R/W emulation abort, or if it's some other abort.
6453 */
6454
6455 PMAP_LOCK(pmap);
6456 #ifdef INVARIANTS
6457 pte1 = pte1_load(pmap_pte1(pmap, far));
6458 if (pte1_is_link(pte1)) {
6459 /*
6460 * Check in advance that associated L2 page table is mapped into
6461 * PT2MAP space. Note that faulty access to not mapped L2 page
6462 * table is caught in more general check above where "far" is
6463 * checked that it does not lay in PT2MAP space. Note also that
6464 * L1 page table and PT2TAB always exist and are mapped.
6465 */
6466 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far));
6467 if (!pte2_is_valid(pte2))
6468 panic("%s: missing L2 page table (%p, %#x)",
6469 __func__, pmap, far);
6470 }
6471 #endif
6472 #ifdef SMP
6473 /*
6474 * Special treatment is due to break-before-make approach done when
6475 * pte1 is updated for userland mapping during section promotion or
6476 * demotion. If not caught here, pmap_enter() can find a section
6477 * mapping on faulting address. That is not allowed.
6478 */
6479 if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) {
6480 PMAP_UNLOCK(pmap);
6481 return (KERN_SUCCESS);
6482 }
6483 #endif
6484 /*
6485 * Access bits for page and section. Note that the entry
6486 * is not in TLB yet, so TLB flush is not necessary.
6487 *
6488 * QQQ: This is hardware emulation, we do not call userret()
6489 * for aborts from user mode.
6490 */
6491 if (idx == FAULT_ACCESS_L2) {
6492 pte1 = pte1_load(pmap_pte1(pmap, far));
6493 if (pte1_is_link(pte1)) {
6494 /* L2 page table should exist and be mapped. */
6495 pte2p = pt2map_entry(far);
6496 pte2 = pte2_load(pte2p);
6497 if (pte2_is_valid(pte2)) {
6498 pte2_store(pte2p, pte2 | PTE2_A);
6499 PMAP_UNLOCK(pmap);
6500 return (KERN_SUCCESS);
6501 }
6502 } else {
6503 /*
6504 * We got L2 access fault but PTE1 is not a link.
6505 * Probably some race happened, do nothing.
6506 */
6507 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L2 - pmap %#x far %#x",
6508 __func__, pmap, far);
6509 PMAP_UNLOCK(pmap);
6510 return (KERN_SUCCESS);
6511 }
6512 }
6513 if (idx == FAULT_ACCESS_L1) {
6514 pte1p = pmap_pte1(pmap, far);
6515 pte1 = pte1_load(pte1p);
6516 if (pte1_is_section(pte1)) {
6517 pte1_store(pte1p, pte1 | PTE1_A);
6518 PMAP_UNLOCK(pmap);
6519 return (KERN_SUCCESS);
6520 } else {
6521 /*
6522 * We got L1 access fault but PTE1 is not section
6523 * mapping. Probably some race happened, do nothing.
6524 */
6525 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L1 - pmap %#x far %#x",
6526 __func__, pmap, far);
6527 PMAP_UNLOCK(pmap);
6528 return (KERN_SUCCESS);
6529 }
6530 }
6531
6532 /*
6533 * Handle modify bits for page and section. Note that the modify
6534 * bit is emulated by software. So PTEx_RO is software read only
6535 * bit and PTEx_NM flag is real hardware read only bit.
6536 *
6537 * QQQ: This is hardware emulation, we do not call userret()
6538 * for aborts from user mode.
6539 */
6540 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) {
6541 pte1 = pte1_load(pmap_pte1(pmap, far));
6542 if (pte1_is_link(pte1)) {
6543 /* L2 page table should exist and be mapped. */
6544 pte2p = pt2map_entry(far);
6545 pte2 = pte2_load(pte2p);
6546 if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) &&
6547 (pte2 & PTE2_NM)) {
6548 pte2_store(pte2p, pte2 & ~PTE2_NM);
6549 tlb_flush(trunc_page(far));
6550 PMAP_UNLOCK(pmap);
6551 return (KERN_SUCCESS);
6552 }
6553 } else {
6554 /*
6555 * We got L2 permission fault but PTE1 is not a link.
6556 * Probably some race happened, do nothing.
6557 */
6558 CTR3(KTR_PMAP, "%s: FAULT_PERM_L2 - pmap %#x far %#x",
6559 __func__, pmap, far);
6560 PMAP_UNLOCK(pmap);
6561 return (KERN_SUCCESS);
6562 }
6563 }
6564 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) {
6565 pte1p = pmap_pte1(pmap, far);
6566 pte1 = pte1_load(pte1p);
6567 if (pte1_is_section(pte1)) {
6568 if (!(pte1 & PTE1_RO) && (pte1 & PTE1_NM)) {
6569 pte1_store(pte1p, pte1 & ~PTE1_NM);
6570 tlb_flush(pte1_trunc(far));
6571 PMAP_UNLOCK(pmap);
6572 return (KERN_SUCCESS);
6573 }
6574 } else {
6575 /*
6576 * We got L1 permission fault but PTE1 is not section
6577 * mapping. Probably some race happened, do nothing.
6578 */
6579 CTR3(KTR_PMAP, "%s: FAULT_PERM_L1 - pmap %#x far %#x",
6580 __func__, pmap, far);
6581 PMAP_UNLOCK(pmap);
6582 return (KERN_SUCCESS);
6583 }
6584 }
6585
6586 /*
6587 * QQQ: The previous code, mainly fast handling of access and
6588 * modify bits aborts, could be moved to ASM. Now we are
6589 * starting to deal with not fast aborts.
6590 */
6591 PMAP_UNLOCK(pmap);
6592 return (KERN_FAILURE);
6593 }
6594
6595 #if defined(PMAP_DEBUG)
6596 /*
6597 * Reusing of KVA used in pmap_zero_page function !!!
6598 */
6599 static void
pmap_zero_page_check(vm_page_t m)6600 pmap_zero_page_check(vm_page_t m)
6601 {
6602 pt2_entry_t *cmap2_pte2p;
6603 uint32_t *p, *end;
6604 struct pcpu *pc;
6605
6606 sched_pin();
6607 pc = get_pcpu();
6608 cmap2_pte2p = pc->pc_cmap2_pte2p;
6609 mtx_lock(&pc->pc_cmap_lock);
6610 if (pte2_load(cmap2_pte2p) != 0)
6611 panic("%s: CMAP2 busy", __func__);
6612 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
6613 vm_page_pte2_attr(m)));
6614 end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE);
6615 for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++)
6616 if (*p != 0)
6617 panic("%s: page %p not zero, va: %p", __func__, m,
6618 pc->pc_cmap2_addr);
6619 pte2_clear(cmap2_pte2p);
6620 tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
6621 sched_unpin();
6622 mtx_unlock(&pc->pc_cmap_lock);
6623 }
6624
6625 int
pmap_pid_dump(int pid)6626 pmap_pid_dump(int pid)
6627 {
6628 pmap_t pmap;
6629 struct proc *p;
6630 int npte2 = 0;
6631 int i, j, index;
6632
6633 sx_slock(&allproc_lock);
6634 FOREACH_PROC_IN_SYSTEM(p) {
6635 if (p->p_pid != pid || p->p_vmspace == NULL)
6636 continue;
6637 index = 0;
6638 pmap = vmspace_pmap(p->p_vmspace);
6639 for (i = 0; i < NPTE1_IN_PT1; i++) {
6640 pt1_entry_t pte1;
6641 pt2_entry_t *pte2p, pte2;
6642 vm_offset_t base, va;
6643 vm_paddr_t pa;
6644 vm_page_t m;
6645
6646 base = i << PTE1_SHIFT;
6647 pte1 = pte1_load(&pmap->pm_pt1[i]);
6648
6649 if (pte1_is_section(pte1)) {
6650 /*
6651 * QQQ: Do something here!
6652 */
6653 } else if (pte1_is_link(pte1)) {
6654 for (j = 0; j < NPTE2_IN_PT2; j++) {
6655 va = base + (j << PAGE_SHIFT);
6656 if (va >= VM_MIN_KERNEL_ADDRESS) {
6657 if (index) {
6658 index = 0;
6659 printf("\n");
6660 }
6661 sx_sunlock(&allproc_lock);
6662 return (npte2);
6663 }
6664 pte2p = pmap_pte2(pmap, va);
6665 pte2 = pte2_load(pte2p);
6666 pmap_pte2_release(pte2p);
6667 if (!pte2_is_valid(pte2))
6668 continue;
6669
6670 pa = pte2_pa(pte2);
6671 m = PHYS_TO_VM_PAGE(pa);
6672 printf("va: 0x%x, pa: 0x%x, w: %d, "
6673 "f: 0x%x", va, pa,
6674 m->ref_count, m->flags);
6675 npte2++;
6676 index++;
6677 if (index >= 2) {
6678 index = 0;
6679 printf("\n");
6680 } else {
6681 printf(" ");
6682 }
6683 }
6684 }
6685 }
6686 }
6687 sx_sunlock(&allproc_lock);
6688 return (npte2);
6689 }
6690
6691 #endif
6692
6693 #ifdef DDB
6694 static pt2_entry_t *
pmap_pte2_ddb(pmap_t pmap,vm_offset_t va)6695 pmap_pte2_ddb(pmap_t pmap, vm_offset_t va)
6696 {
6697 pt1_entry_t pte1;
6698 vm_paddr_t pt2pg_pa;
6699
6700 pte1 = pte1_load(pmap_pte1(pmap, va));
6701 if (!pte1_is_link(pte1))
6702 return (NULL);
6703
6704 if (pmap_is_current(pmap))
6705 return (pt2map_entry(va));
6706
6707 /* Note that L2 page table size is not equal to PAGE_SIZE. */
6708 pt2pg_pa = trunc_page(pte1_link_pa(pte1));
6709 if (pte2_pa(pte2_load(PMAP3)) != pt2pg_pa) {
6710 pte2_store(PMAP3, PTE2_KPT(pt2pg_pa));
6711 #ifdef SMP
6712 PMAP3cpu = PCPU_GET(cpuid);
6713 #endif
6714 tlb_flush_local((vm_offset_t)PADDR3);
6715 }
6716 #ifdef SMP
6717 else if (PMAP3cpu != PCPU_GET(cpuid)) {
6718 PMAP3cpu = PCPU_GET(cpuid);
6719 tlb_flush_local((vm_offset_t)PADDR3);
6720 }
6721 #endif
6722 return (PADDR3 + (arm32_btop(va) & (NPTE2_IN_PG - 1)));
6723 }
6724
6725 static void
dump_pmap(pmap_t pmap)6726 dump_pmap(pmap_t pmap)
6727 {
6728
6729 printf("pmap %p\n", pmap);
6730 printf(" pm_pt1: %p\n", pmap->pm_pt1);
6731 printf(" pm_pt2tab: %p\n", pmap->pm_pt2tab);
6732 printf(" pm_active: 0x%08lX\n", pmap->pm_active.__bits[0]);
6733 }
6734
DB_SHOW_COMMAND(pmaps,pmap_list_pmaps)6735 DB_SHOW_COMMAND(pmaps, pmap_list_pmaps)
6736 {
6737
6738 pmap_t pmap;
6739 LIST_FOREACH(pmap, &allpmaps, pm_list) {
6740 dump_pmap(pmap);
6741 }
6742 }
6743
6744 static int
pte2_class(pt2_entry_t pte2)6745 pte2_class(pt2_entry_t pte2)
6746 {
6747 int cls;
6748
6749 cls = (pte2 >> 2) & 0x03;
6750 cls |= (pte2 >> 4) & 0x04;
6751 return (cls);
6752 }
6753
6754 static void
dump_section(pmap_t pmap,uint32_t pte1_idx)6755 dump_section(pmap_t pmap, uint32_t pte1_idx)
6756 {
6757 }
6758
6759 static void
dump_link(pmap_t pmap,uint32_t pte1_idx,bool invalid_ok)6760 dump_link(pmap_t pmap, uint32_t pte1_idx, bool invalid_ok)
6761 {
6762 uint32_t i;
6763 vm_offset_t va;
6764 pt2_entry_t *pte2p, pte2;
6765 vm_page_t m;
6766
6767 va = pte1_idx << PTE1_SHIFT;
6768 pte2p = pmap_pte2_ddb(pmap, va);
6769 for (i = 0; i < NPTE2_IN_PT2; i++, pte2p++, va += PAGE_SIZE) {
6770 pte2 = pte2_load(pte2p);
6771 if (pte2 == 0)
6772 continue;
6773 if (!pte2_is_valid(pte2)) {
6774 printf(" 0x%08X: 0x%08X", va, pte2);
6775 if (!invalid_ok)
6776 printf(" - not valid !!!");
6777 printf("\n");
6778 continue;
6779 }
6780 m = PHYS_TO_VM_PAGE(pte2_pa(pte2));
6781 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2,
6782 pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m);
6783 if (m != NULL) {
6784 printf(" v:%d w:%d f:0x%04X\n", m->valid,
6785 m->ref_count, m->flags);
6786 } else {
6787 printf("\n");
6788 }
6789 }
6790 }
6791
6792 static __inline bool
is_pv_chunk_space(vm_offset_t va)6793 is_pv_chunk_space(vm_offset_t va)
6794 {
6795
6796 if ((((vm_offset_t)pv_chunkbase) <= va) &&
6797 (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks)))
6798 return (true);
6799 return (false);
6800 }
6801
DB_SHOW_COMMAND(pmap,pmap_pmap_print)6802 DB_SHOW_COMMAND(pmap, pmap_pmap_print)
6803 {
6804 /* XXX convert args. */
6805 pmap_t pmap = (pmap_t)addr;
6806 pt1_entry_t pte1;
6807 pt2_entry_t pte2;
6808 vm_offset_t va, eva;
6809 vm_page_t m;
6810 uint32_t i;
6811 bool invalid_ok, dump_link_ok, dump_pv_chunk;
6812
6813 if (have_addr) {
6814 pmap_t pm;
6815
6816 LIST_FOREACH(pm, &allpmaps, pm_list)
6817 if (pm == pmap) break;
6818 if (pm == NULL) {
6819 printf("given pmap %p is not in allpmaps list\n", pmap);
6820 return;
6821 }
6822 } else
6823 pmap = PCPU_GET(curpmap);
6824
6825 eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF;
6826 dump_pv_chunk = false; /* XXX evaluate from modif[] */
6827
6828 printf("pmap: 0x%08X\n", (uint32_t)pmap);
6829 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP);
6830 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab);
6831
6832 for(i = 0; i < NPTE1_IN_PT1; i++) {
6833 pte1 = pte1_load(&pmap->pm_pt1[i]);
6834 if (pte1 == 0)
6835 continue;
6836 va = i << PTE1_SHIFT;
6837 if (va >= eva)
6838 break;
6839
6840 if (pte1_is_section(pte1)) {
6841 printf("0x%08X: Section 0x%08X, s:%d g:%d\n", va, pte1,
6842 !!(pte1 & PTE1_S), !(pte1 & PTE1_NG));
6843 dump_section(pmap, i);
6844 } else if (pte1_is_link(pte1)) {
6845 dump_link_ok = true;
6846 invalid_ok = false;
6847 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
6848 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
6849 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p",
6850 va, pte1, pte2, m);
6851 if (is_pv_chunk_space(va)) {
6852 printf(" - pv_chunk space");
6853 if (dump_pv_chunk)
6854 invalid_ok = true;
6855 else
6856 dump_link_ok = false;
6857 }
6858 else if (m != NULL)
6859 printf(" w:%d w2:%u", m->ref_count,
6860 pt2_wirecount_get(m, pte1_index(va)));
6861 if (pte2 == 0)
6862 printf(" !!! pt2tab entry is ZERO");
6863 else if (pte2_pa(pte1) != pte2_pa(pte2))
6864 printf(" !!! pt2tab entry is DIFFERENT - m: %p",
6865 PHYS_TO_VM_PAGE(pte2_pa(pte2)));
6866 printf("\n");
6867 if (dump_link_ok)
6868 dump_link(pmap, i, invalid_ok);
6869 } else
6870 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1);
6871 }
6872 }
6873
6874 static void
dump_pt2tab(pmap_t pmap)6875 dump_pt2tab(pmap_t pmap)
6876 {
6877 uint32_t i;
6878 pt2_entry_t pte2;
6879 vm_offset_t va;
6880 vm_paddr_t pa;
6881 vm_page_t m;
6882
6883 printf("PT2TAB:\n");
6884 for (i = 0; i < PT2TAB_ENTRIES; i++) {
6885 pte2 = pte2_load(&pmap->pm_pt2tab[i]);
6886 if (!pte2_is_valid(pte2))
6887 continue;
6888 va = i << PT2TAB_SHIFT;
6889 pa = pte2_pa(pte2);
6890 m = PHYS_TO_VM_PAGE(pa);
6891 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2,
6892 pte2_class(pte2), !!(pte2 & PTE2_S), m);
6893 if (m != NULL)
6894 printf(" , w: %d, f: 0x%04X pidx: %lld",
6895 m->ref_count, m->flags, m->pindex);
6896 printf("\n");
6897 }
6898 }
6899
DB_SHOW_COMMAND(pmap_pt2tab,pmap_pt2tab_print)6900 DB_SHOW_COMMAND(pmap_pt2tab, pmap_pt2tab_print)
6901 {
6902 /* XXX convert args. */
6903 pmap_t pmap = (pmap_t)addr;
6904 pt1_entry_t pte1;
6905 pt2_entry_t pte2;
6906 vm_offset_t va;
6907 uint32_t i, start;
6908
6909 if (have_addr) {
6910 printf("supported only on current pmap\n");
6911 return;
6912 }
6913
6914 pmap = PCPU_GET(curpmap);
6915 printf("curpmap: 0x%08X\n", (uint32_t)pmap);
6916 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP);
6917 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab);
6918
6919 start = pte1_index((vm_offset_t)PT2MAP);
6920 for (i = start; i < (start + NPT2_IN_PT2TAB); i++) {
6921 pte1 = pte1_load(&pmap->pm_pt1[i]);
6922 if (pte1 == 0)
6923 continue;
6924 va = i << PTE1_SHIFT;
6925 if (pte1_is_section(pte1)) {
6926 printf("0x%08X: Section 0x%08X, s:%d\n", va, pte1,
6927 !!(pte1 & PTE1_S));
6928 dump_section(pmap, i);
6929 } else if (pte1_is_link(pte1)) {
6930 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
6931 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X\n", va,
6932 pte1, pte2);
6933 if (pte2 == 0)
6934 printf(" !!! pt2tab entry is ZERO\n");
6935 } else
6936 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1);
6937 }
6938 dump_pt2tab(pmap);
6939 }
6940 #endif
6941