1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * Copyright (c) 1994 John S. Dyson 6 * Copyright (c) 1994 David Greenman 7 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 8 * Copyright (c) 2014-2016 Svatopluk Kraus <skra@FreeBSD.org> 9 * Copyright (c) 2014-2016 Michal Meloun <mmel@FreeBSD.org> 10 * All rights reserved. 11 * 12 * This code is derived from software contributed to Berkeley by 13 * the Systems Programming Group of the University of Utah Computer 14 * Science Department and William Jolitz of UUNET Technologies Inc. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 41 */ 42 /*- 43 * Copyright (c) 2003 Networks Associates Technology, Inc. 44 * All rights reserved. 45 * 46 * This software was developed for the FreeBSD Project by Jake Burkholder, 47 * Safeport Network Services, and Network Associates Laboratories, the 48 * Security Research Division of Network Associates, Inc. under 49 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 50 * CHATS research program. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 61 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 */ 73 74 #include <sys/cdefs.h> 75 __FBSDID("$FreeBSD$"); 76 77 /* 78 * Manages physical address maps. 79 * 80 * Since the information managed by this module is 81 * also stored by the logical address mapping module, 82 * this module may throw away valid virtual-to-physical 83 * mappings at almost any time. However, invalidations 84 * of virtual-to-physical mappings must be done as 85 * requested. 86 * 87 * In order to cope with hardware architectures which 88 * make virtual-to-physical map invalidates expensive, 89 * this module may delay invalidate or reduced protection 90 * operations until such time as they are actually 91 * necessary. This module is given full information as 92 * to which processors are currently using which maps, 93 * and to when physical maps must be made correct. 94 */ 95 96 #include "opt_vm.h" 97 #include "opt_pmap.h" 98 #include "opt_ddb.h" 99 100 #include <sys/param.h> 101 #include <sys/systm.h> 102 #include <sys/kernel.h> 103 #include <sys/ktr.h> 104 #include <sys/lock.h> 105 #include <sys/proc.h> 106 #include <sys/rwlock.h> 107 #include <sys/malloc.h> 108 #include <sys/vmmeter.h> 109 #include <sys/malloc.h> 110 #include <sys/mman.h> 111 #include <sys/sf_buf.h> 112 #include <sys/smp.h> 113 #include <sys/sched.h> 114 #include <sys/sysctl.h> 115 116 #ifdef DDB 117 #include <ddb/ddb.h> 118 #endif 119 120 #include <machine/physmem.h> 121 122 #include <vm/vm.h> 123 #include <vm/uma.h> 124 #include <vm/pmap.h> 125 #include <vm/vm_param.h> 126 #include <vm/vm_kern.h> 127 #include <vm/vm_object.h> 128 #include <vm/vm_map.h> 129 #include <vm/vm_page.h> 130 #include <vm/vm_pageout.h> 131 #include <vm/vm_phys.h> 132 #include <vm/vm_extern.h> 133 #include <vm/vm_reserv.h> 134 #include <sys/lock.h> 135 #include <sys/mutex.h> 136 137 #include <machine/md_var.h> 138 #include <machine/pmap_var.h> 139 #include <machine/cpu.h> 140 #include <machine/pcb.h> 141 #include <machine/sf_buf.h> 142 #ifdef SMP 143 #include <machine/smp.h> 144 #endif 145 #ifndef PMAP_SHPGPERPROC 146 #define PMAP_SHPGPERPROC 200 147 #endif 148 149 #ifndef DIAGNOSTIC 150 #define PMAP_INLINE __inline 151 #else 152 #define PMAP_INLINE 153 #endif 154 155 #ifdef PMAP_DEBUG 156 static void pmap_zero_page_check(vm_page_t m); 157 void pmap_debug(int level); 158 int pmap_pid_dump(int pid); 159 160 #define PDEBUG(_lev_,_stat_) \ 161 if (pmap_debug_level >= (_lev_)) \ 162 ((_stat_)) 163 #define dprintf printf 164 int pmap_debug_level = 1; 165 #else /* PMAP_DEBUG */ 166 #define PDEBUG(_lev_,_stat_) /* Nothing */ 167 #define dprintf(x, arg...) 168 #endif /* PMAP_DEBUG */ 169 170 /* 171 * Level 2 page tables map definion ('max' is excluded). 172 */ 173 174 #define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 175 #define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE) 176 177 #define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 178 #define UPT2V_MAX_ADDRESS \ 179 ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT))) 180 181 /* 182 * Promotion to a 1MB (PTE1) page mapping requires that the corresponding 183 * 4KB (PTE2) page mappings have identical settings for the following fields: 184 */ 185 #define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \ 186 PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \ 187 PTE2_ATTR_MASK) 188 189 #define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \ 190 PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \ 191 PTE1_ATTR_MASK) 192 193 #define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \ 194 (((l2_attr) & L2_C) ? L1_S_C : 0) | \ 195 (((l2_attr) & L2_B) ? L1_S_B : 0) | \ 196 (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \ 197 (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \ 198 (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \ 199 (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \ 200 (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \ 201 (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \ 202 (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \ 203 (((l2_attr) & PTE2_W) ? PTE1_W : 0)) 204 205 #define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \ 206 (((l1_attr) & L1_S_C) ? L2_C : 0) | \ 207 (((l1_attr) & L1_S_B) ? L2_B : 0) | \ 208 (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \ 209 (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \ 210 (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \ 211 (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \ 212 (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \ 213 (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \ 214 (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \ 215 (((l1_attr) & PTE1_W) ? PTE2_W : 0)) 216 217 /* 218 * PTE2 descriptors creation macros. 219 */ 220 #define PTE2_ATTR_DEFAULT vm_memattr_to_pte2(VM_MEMATTR_DEFAULT) 221 #define PTE2_ATTR_PT vm_memattr_to_pte2(pt_memattr) 222 223 #define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 224 #define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 225 226 #define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT) 227 #define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_DEFAULT) 228 229 #define PV_STATS 230 #ifdef PV_STATS 231 #define PV_STAT(x) do { x ; } while (0) 232 #else 233 #define PV_STAT(x) do { } while (0) 234 #endif 235 236 /* 237 * The boot_pt1 is used temporary in very early boot stage as L1 page table. 238 * We can init many things with no memory allocation thanks to its static 239 * allocation and this brings two main advantages: 240 * (1) other cores can be started very simply, 241 * (2) various boot loaders can be supported as its arguments can be processed 242 * in virtual address space and can be moved to safe location before 243 * first allocation happened. 244 * Only disadvantage is that boot_pt1 is used only in very early boot stage. 245 * However, the table is uninitialized and so lays in bss. Therefore kernel 246 * image size is not influenced. 247 * 248 * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and 249 * CPU suspend/resume game. 250 */ 251 extern pt1_entry_t boot_pt1[]; 252 253 vm_paddr_t base_pt1; 254 pt1_entry_t *kern_pt1; 255 pt2_entry_t *kern_pt2tab; 256 pt2_entry_t *PT2MAP; 257 258 static uint32_t ttb_flags; 259 static vm_memattr_t pt_memattr; 260 ttb_entry_t pmap_kern_ttb; 261 262 struct pmap kernel_pmap_store; 263 LIST_HEAD(pmaplist, pmap); 264 static struct pmaplist allpmaps; 265 static struct mtx allpmaps_lock; 266 267 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 268 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 269 270 static vm_offset_t kernel_vm_end_new; 271 vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE; 272 vm_offset_t vm_max_kernel_address; 273 vm_paddr_t kernel_l1pa; 274 275 static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock; 276 277 /* 278 * Data for the pv entry allocation mechanism 279 */ 280 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 281 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 282 static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */ 283 static int shpgperproc = PMAP_SHPGPERPROC; 284 285 struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 286 int pv_maxchunks; /* How many chunks we have KVA for */ 287 vm_offset_t pv_vafree; /* freelist stored in the PTE */ 288 289 vm_paddr_t first_managed_pa; 290 #define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) 291 292 /* 293 * All those kernel PT submaps that BSD is so fond of 294 */ 295 caddr_t _tmppt = 0; 296 297 /* 298 * Crashdump maps. 299 */ 300 static caddr_t crashdumpmap; 301 302 static pt2_entry_t *PMAP1 = NULL, *PMAP2; 303 static pt2_entry_t *PADDR1 = NULL, *PADDR2; 304 #ifdef DDB 305 static pt2_entry_t *PMAP3; 306 static pt2_entry_t *PADDR3; 307 static int PMAP3cpu __unused; /* for SMP only */ 308 #endif 309 #ifdef SMP 310 static int PMAP1cpu; 311 static int PMAP1changedcpu; 312 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 313 &PMAP1changedcpu, 0, 314 "Number of times pmap_pte2_quick changed CPU with same PMAP1"); 315 #endif 316 static int PMAP1changed; 317 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 318 &PMAP1changed, 0, 319 "Number of times pmap_pte2_quick changed PMAP1"); 320 static int PMAP1unchanged; 321 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 322 &PMAP1unchanged, 0, 323 "Number of times pmap_pte2_quick didn't change PMAP1"); 324 static struct mtx PMAP2mutex; 325 326 /* 327 * Internal flags for pmap_enter()'s helper functions. 328 */ 329 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 330 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 331 332 static __inline void pt2_wirecount_init(vm_page_t m); 333 static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, 334 vm_offset_t va); 335 static int pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, 336 u_int flags, vm_page_t m); 337 void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size); 338 339 /* 340 * Function to set the debug level of the pmap code. 341 */ 342 #ifdef PMAP_DEBUG 343 void 344 pmap_debug(int level) 345 { 346 347 pmap_debug_level = level; 348 dprintf("pmap_debug: level=%d\n", pmap_debug_level); 349 } 350 #endif /* PMAP_DEBUG */ 351 352 /* 353 * This table must corespond with memory attribute configuration in vm.h. 354 * First entry is used for normal system mapping. 355 * 356 * Device memory is always marked as shared. 357 * Normal memory is shared only in SMP . 358 * Not outer shareable bits are not used yet. 359 * Class 6 cannot be used on ARM11. 360 */ 361 #define TEXDEF_TYPE_SHIFT 0 362 #define TEXDEF_TYPE_MASK 0x3 363 #define TEXDEF_INNER_SHIFT 2 364 #define TEXDEF_INNER_MASK 0x3 365 #define TEXDEF_OUTER_SHIFT 4 366 #define TEXDEF_OUTER_MASK 0x3 367 #define TEXDEF_NOS_SHIFT 6 368 #define TEXDEF_NOS_MASK 0x1 369 370 #define TEX(t, i, o, s) \ 371 ((t) << TEXDEF_TYPE_SHIFT) | \ 372 ((i) << TEXDEF_INNER_SHIFT) | \ 373 ((o) << TEXDEF_OUTER_SHIFT | \ 374 ((s) << TEXDEF_NOS_SHIFT)) 375 376 static uint32_t tex_class[8] = { 377 /* type inner cache outer cache */ 378 TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */ 379 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */ 380 TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */ 381 TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */ 382 TEX(PRRR_MEM, NMRR_WT, NMRR_WT, 0), /* 4 - ATTR_WT */ 383 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */ 384 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */ 385 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */ 386 }; 387 #undef TEX 388 389 static uint32_t pte2_attr_tab[8] = { 390 PTE2_ATTR_WB_WA, /* 0 - VM_MEMATTR_WB_WA */ 391 PTE2_ATTR_NOCACHE, /* 1 - VM_MEMATTR_NOCACHE */ 392 PTE2_ATTR_DEVICE, /* 2 - VM_MEMATTR_DEVICE */ 393 PTE2_ATTR_SO, /* 3 - VM_MEMATTR_SO */ 394 PTE2_ATTR_WT, /* 4 - VM_MEMATTR_WRITE_THROUGH */ 395 0, /* 5 - NOT USED YET */ 396 0, /* 6 - NOT USED YET */ 397 0 /* 7 - NOT USED YET */ 398 }; 399 CTASSERT(VM_MEMATTR_WB_WA == 0); 400 CTASSERT(VM_MEMATTR_NOCACHE == 1); 401 CTASSERT(VM_MEMATTR_DEVICE == 2); 402 CTASSERT(VM_MEMATTR_SO == 3); 403 CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4); 404 #define VM_MEMATTR_END (VM_MEMATTR_WRITE_THROUGH + 1) 405 406 boolean_t 407 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) 408 { 409 410 return (mode >= 0 && mode < VM_MEMATTR_END); 411 } 412 413 static inline uint32_t 414 vm_memattr_to_pte2(vm_memattr_t ma) 415 { 416 417 KASSERT((u_int)ma < VM_MEMATTR_END, 418 ("%s: bad vm_memattr_t %d", __func__, ma)); 419 return (pte2_attr_tab[(u_int)ma]); 420 } 421 422 static inline uint32_t 423 vm_page_pte2_attr(vm_page_t m) 424 { 425 426 return (vm_memattr_to_pte2(m->md.pat_mode)); 427 } 428 429 /* 430 * Convert TEX definition entry to TTB flags. 431 */ 432 static uint32_t 433 encode_ttb_flags(int idx) 434 { 435 uint32_t inner, outer, nos, reg; 436 437 inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) & 438 TEXDEF_INNER_MASK; 439 outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) & 440 TEXDEF_OUTER_MASK; 441 nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) & 442 TEXDEF_NOS_MASK; 443 444 reg = nos << 5; 445 reg |= outer << 3; 446 if (cpuinfo.coherent_walk) 447 reg |= (inner & 0x1) << 6; 448 reg |= (inner & 0x2) >> 1; 449 #ifdef SMP 450 ARM_SMP_UP( 451 reg |= 1 << 1, 452 ); 453 #endif 454 return reg; 455 } 456 457 /* 458 * Set TEX remapping registers in current CPU. 459 */ 460 void 461 pmap_set_tex(void) 462 { 463 uint32_t prrr, nmrr; 464 uint32_t type, inner, outer, nos; 465 int i; 466 467 #ifdef PMAP_PTE_NOCACHE 468 /* XXX fixme */ 469 if (cpuinfo.coherent_walk) { 470 pt_memattr = VM_MEMATTR_WB_WA; 471 ttb_flags = encode_ttb_flags(0); 472 } 473 else { 474 pt_memattr = VM_MEMATTR_NOCACHE; 475 ttb_flags = encode_ttb_flags(1); 476 } 477 #else 478 pt_memattr = VM_MEMATTR_WB_WA; 479 ttb_flags = encode_ttb_flags(0); 480 #endif 481 482 prrr = 0; 483 nmrr = 0; 484 485 /* Build remapping register from TEX classes. */ 486 for (i = 0; i < 8; i++) { 487 type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) & 488 TEXDEF_TYPE_MASK; 489 inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) & 490 TEXDEF_INNER_MASK; 491 outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) & 492 TEXDEF_OUTER_MASK; 493 nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) & 494 TEXDEF_NOS_MASK; 495 496 prrr |= type << (i * 2); 497 prrr |= nos << (i + 24); 498 nmrr |= inner << (i * 2); 499 nmrr |= outer << (i * 2 + 16); 500 } 501 /* Add shareable bits for device memory. */ 502 prrr |= PRRR_DS0 | PRRR_DS1; 503 504 /* Add shareable bits for normal memory in SMP case. */ 505 #ifdef SMP 506 ARM_SMP_UP( 507 prrr |= PRRR_NS1, 508 ); 509 #endif 510 cp15_prrr_set(prrr); 511 cp15_nmrr_set(nmrr); 512 513 /* Caches are disabled, so full TLB flush should be enough. */ 514 tlb_flush_all_local(); 515 } 516 517 /* 518 * Remap one vm_meattr class to another one. This can be useful as 519 * workaround for SOC errata, e.g. if devices must be accessed using 520 * SO memory class. 521 * 522 * !!! Please note that this function is absolutely last resort thing. 523 * It should not be used under normal circumstances. !!! 524 * 525 * Usage rules: 526 * - it shall be called after pmap_bootstrap_prepare() and before 527 * cpu_mp_start() (thus only on boot CPU). In practice, it's expected 528 * to be called from platform_attach() or platform_late_init(). 529 * 530 * - if remapping doesn't change caching mode, or until uncached class 531 * is remapped to any kind of cached one, then no other restriction exists. 532 * 533 * - if pmap_remap_vm_attr() changes caching mode, but both (original and 534 * remapped) remain cached, then caller is resposible for calling 535 * of dcache_wbinv_poc_all(). 536 * 537 * - remapping of any kind of cached class to uncached is not permitted. 538 */ 539 void 540 pmap_remap_vm_attr(vm_memattr_t old_attr, vm_memattr_t new_attr) 541 { 542 int old_idx, new_idx; 543 544 /* Map VM memattrs to indexes to tex_class table. */ 545 old_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)old_attr]); 546 new_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)new_attr]); 547 548 /* Replace TEX attribute and apply it. */ 549 tex_class[old_idx] = tex_class[new_idx]; 550 pmap_set_tex(); 551 } 552 553 /* 554 * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words, 555 * KERNBASE is mapped by first L2 page table in L2 page table page. It 556 * meets same constrain due to PT2MAP being placed just under KERNBASE. 557 */ 558 CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0); 559 CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE); 560 561 /* 562 * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general. 563 * For now, anyhow, the following check must be fulfilled. 564 */ 565 CTASSERT(PAGE_SIZE == PTE2_SIZE); 566 /* 567 * We don't want to mess up MI code with all MMU and PMAP definitions, 568 * so some things, which depend on other ones, are defined independently. 569 * Now, it is time to check that we don't screw up something. 570 */ 571 CTASSERT(PDRSHIFT == PTE1_SHIFT); 572 /* 573 * Check L1 and L2 page table entries definitions consistency. 574 */ 575 CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1)); 576 CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2)); 577 /* 578 * Check L2 page tables page consistency. 579 */ 580 CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2)); 581 CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG); 582 /* 583 * Check PT2TAB consistency. 584 * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG. 585 * This should be done without remainder. 586 */ 587 CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG)); 588 589 /* 590 * A PT2MAP magic. 591 * 592 * All level 2 page tables (PT2s) are mapped continuously and accordingly 593 * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can 594 * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page 595 * must be used together, but not necessary at once. The first PT2 in a page 596 * must map things on correctly aligned address and the others must follow 597 * in right order. 598 */ 599 #define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t)) 600 #define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2) 601 #define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE) 602 603 /* 604 * Check PT2TAB consistency. 605 * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2. 606 * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE. 607 * The both should be done without remainder. 608 */ 609 CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2)); 610 CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE)); 611 /* 612 * The implementation was made general, however, with the assumption 613 * bellow in mind. In case of another value of NPG_IN_PT2TAB, 614 * the code should be once more rechecked. 615 */ 616 CTASSERT(NPG_IN_PT2TAB == 1); 617 618 /* 619 * Get offset of PT2 in a page 620 * associated with given PT1 index. 621 */ 622 static __inline u_int 623 page_pt2off(u_int pt1_idx) 624 { 625 626 return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2); 627 } 628 629 /* 630 * Get physical address of PT2 631 * associated with given PT2s page and PT1 index. 632 */ 633 static __inline vm_paddr_t 634 page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx) 635 { 636 637 return (pgpa + page_pt2off(pt1_idx)); 638 } 639 640 /* 641 * Get first entry of PT2 642 * associated with given PT2s page and PT1 index. 643 */ 644 static __inline pt2_entry_t * 645 page_pt2(vm_offset_t pgva, u_int pt1_idx) 646 { 647 648 return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx))); 649 } 650 651 /* 652 * Get virtual address of PT2s page (mapped in PT2MAP) 653 * which holds PT2 which holds entry which maps given virtual address. 654 */ 655 static __inline vm_offset_t 656 pt2map_pt2pg(vm_offset_t va) 657 { 658 659 va &= ~(NPT2_IN_PG * PTE1_SIZE - 1); 660 return ((vm_offset_t)pt2map_entry(va)); 661 } 662 663 /***************************************************************************** 664 * 665 * THREE pmap initialization milestones exist: 666 * 667 * locore.S 668 * -> fundamental init (including MMU) in ASM 669 * 670 * initarm() 671 * -> fundamental init continues in C 672 * -> first available physical address is known 673 * 674 * pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins) 675 * -> basic (safe) interface for physical address allocation is made 676 * -> basic (safe) interface for virtual mapping is made 677 * -> limited not SMP coherent work is possible 678 * 679 * -> more fundamental init continues in C 680 * -> locks and some more things are available 681 * -> all fundamental allocations and mappings are done 682 * 683 * pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins) 684 * -> phys_avail[] and virtual_avail is set 685 * -> control is passed to vm subsystem 686 * -> physical and virtual address allocation are off limit 687 * -> low level mapping functions, some SMP coherent, 688 * are available, which cannot be used before vm subsystem 689 * is being inited 690 * 691 * mi_startup() 692 * -> vm subsystem is being inited 693 * 694 * pmap_init() -> THIRD PMAP MILESTONE (third epoch begins) 695 * -> pmap is fully inited 696 * 697 *****************************************************************************/ 698 699 /***************************************************************************** 700 * 701 * PMAP first stage initialization and utility functions 702 * for pre-bootstrap epoch. 703 * 704 * After pmap_bootstrap_prepare() is called, the following functions 705 * can be used: 706 * 707 * (1) strictly only for this stage functions for physical page allocations, 708 * virtual space allocations, and mappings: 709 * 710 * vm_paddr_t pmap_preboot_get_pages(u_int num); 711 * void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num); 712 * vm_offset_t pmap_preboot_reserve_pages(u_int num); 713 * vm_offset_t pmap_preboot_get_vpages(u_int num); 714 * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 715 * vm_prot_t prot, vm_memattr_t attr); 716 * 717 * (2) for all stages: 718 * 719 * vm_paddr_t pmap_kextract(vm_offset_t va); 720 * 721 * NOTE: This is not SMP coherent stage. 722 * 723 *****************************************************************************/ 724 725 #define KERNEL_P2V(pa) \ 726 ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR)) 727 #define KERNEL_V2P(va) \ 728 ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr)) 729 730 static vm_paddr_t last_paddr; 731 732 /* 733 * Pre-bootstrap epoch page allocator. 734 */ 735 vm_paddr_t 736 pmap_preboot_get_pages(u_int num) 737 { 738 vm_paddr_t ret; 739 740 ret = last_paddr; 741 last_paddr += num * PAGE_SIZE; 742 743 return (ret); 744 } 745 746 /* 747 * The fundamental initialization of PMAP stuff. 748 * 749 * Some things already happened in locore.S and some things could happen 750 * before pmap_bootstrap_prepare() is called, so let's recall what is done: 751 * 1. Caches are disabled. 752 * 2. We are running on virtual addresses already with 'boot_pt1' 753 * as L1 page table. 754 * 3. So far, all virtual addresses can be converted to physical ones and 755 * vice versa by the following macros: 756 * KERNEL_P2V(pa) .... physical to virtual ones, 757 * KERNEL_V2P(va) .... virtual to physical ones. 758 * 759 * What is done herein: 760 * 1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'. 761 * 2. PT2MAP magic is brought to live. 762 * 3. Basic preboot functions for page allocations and mappings can be used. 763 * 4. Everything is prepared for L1 cache enabling. 764 * 765 * Variations: 766 * 1. To use second TTB register, so kernel and users page tables will be 767 * separated. This way process forking - pmap_pinit() - could be faster, 768 * it saves physical pages and KVA per a process, and it's simple change. 769 * However, it will lead, due to hardware matter, to the following: 770 * (a) 2G space for kernel and 2G space for users. 771 * (b) 1G space for kernel in low addresses and 3G for users above it. 772 * A question is: Is the case (b) really an option? Note that case (b) 773 * does save neither physical memory and KVA. 774 */ 775 void 776 pmap_bootstrap_prepare(vm_paddr_t last) 777 { 778 vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size; 779 vm_offset_t pt2pg_va; 780 pt1_entry_t *pte1p; 781 pt2_entry_t *pte2p; 782 u_int i; 783 uint32_t l1_attr; 784 785 /* 786 * Now, we are going to make real kernel mapping. Note that we are 787 * already running on some mapping made in locore.S and we expect 788 * that it's large enough to ensure nofault access to physical memory 789 * allocated herein before switch. 790 * 791 * As kernel image and everything needed before are and will be mapped 792 * by section mappings, we align last physical address to PTE1_SIZE. 793 */ 794 last_paddr = pte1_roundup(last); 795 796 /* 797 * Allocate and zero page(s) for kernel L1 page table. 798 * 799 * Note that it's first allocation on space which was PTE1_SIZE 800 * aligned and as such base_pt1 is aligned to NB_IN_PT1 too. 801 */ 802 base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1); 803 kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1); 804 bzero((void*)kern_pt1, NB_IN_PT1); 805 pte1_sync_range(kern_pt1, NB_IN_PT1); 806 807 /* Allocate and zero page(s) for kernel PT2TAB. */ 808 pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB); 809 kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa); 810 bzero(kern_pt2tab, NB_IN_PT2TAB); 811 pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB); 812 813 /* Allocate and zero page(s) for kernel L2 page tables. */ 814 pt2pg_pa = pmap_preboot_get_pages(NKPT2PG); 815 pt2pg_va = KERNEL_P2V(pt2pg_pa); 816 size = NKPT2PG * PAGE_SIZE; 817 bzero((void*)pt2pg_va, size); 818 pte2_sync_range((pt2_entry_t *)pt2pg_va, size); 819 820 /* 821 * Add a physical memory segment (vm_phys_seg) corresponding to the 822 * preallocated pages for kernel L2 page tables so that vm_page 823 * structures representing these pages will be created. The vm_page 824 * structures are required for promotion of the corresponding kernel 825 * virtual addresses to section mappings. 826 */ 827 vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0)); 828 829 /* 830 * Insert allocated L2 page table pages to PT2TAB and make 831 * link to all PT2s in L1 page table. See how kernel_vm_end 832 * is initialized. 833 * 834 * We play simple and safe. So every KVA will have underlaying 835 * L2 page table, even kernel image mapped by sections. 836 */ 837 pte2p = kern_pt2tab_entry(KERNBASE); 838 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE) 839 pt2tab_store(pte2p++, PTE2_KPT(pa)); 840 841 pte1p = kern_pte1(KERNBASE); 842 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2) 843 pte1_store(pte1p++, PTE1_LINK(pa)); 844 845 /* Make section mappings for kernel. */ 846 l1_attr = ATTR_TO_L1(PTE2_ATTR_DEFAULT); 847 pte1p = kern_pte1(KERNBASE); 848 for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE) 849 pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW, l1_attr)); 850 851 /* 852 * Get free and aligned space for PT2MAP and make L1 page table links 853 * to L2 page tables held in PT2TAB. 854 * 855 * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t 856 * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus 857 * each entry in PT2TAB maps all PT2s in a page. This implies that 858 * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE. 859 */ 860 PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE); 861 pte1p = kern_pte1((vm_offset_t)PT2MAP); 862 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 863 pte1_store(pte1p++, PTE1_LINK(pa)); 864 } 865 866 /* 867 * Store PT2TAB in PT2TAB itself, i.e. self reference mapping. 868 * Each pmap will hold own PT2TAB, so the mapping should be not global. 869 */ 870 pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP); 871 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 872 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 873 } 874 875 /* 876 * Choose correct L2 page table and make mappings for allocations 877 * made herein which replaces temporary locore.S mappings after a while. 878 * Note that PT2MAP cannot be used until we switch to kern_pt1. 879 * 880 * Note, that these allocations started aligned on 1M section and 881 * kernel PT1 was allocated first. Making of mappings must follow 882 * order of physical allocations as we've used KERNEL_P2V() macro 883 * for virtual addresses resolution. 884 */ 885 pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1); 886 pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p))); 887 888 pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1)); 889 890 /* Make mapping for kernel L1 page table. */ 891 for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE) 892 pte2_store(pte2p++, PTE2_KPT(pa)); 893 894 /* Make mapping for kernel PT2TAB. */ 895 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) 896 pte2_store(pte2p++, PTE2_KPT(pa)); 897 898 /* Finally, switch from 'boot_pt1' to 'kern_pt1'. */ 899 pmap_kern_ttb = base_pt1 | ttb_flags; 900 cpuinfo_reinit_mmu(pmap_kern_ttb); 901 /* 902 * Initialize the first available KVA. As kernel image is mapped by 903 * sections, we are leaving some gap behind. 904 */ 905 virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE; 906 } 907 908 /* 909 * Setup L2 page table page for given KVA. 910 * Used in pre-bootstrap epoch. 911 * 912 * Note that we have allocated NKPT2PG pages for L2 page tables in advance 913 * and used them for mapping KVA starting from KERNBASE. However, this is not 914 * enough. Vectors and devices need L2 page tables too. Note that they are 915 * even above VM_MAX_KERNEL_ADDRESS. 916 */ 917 static __inline vm_paddr_t 918 pmap_preboot_pt2pg_setup(vm_offset_t va) 919 { 920 pt2_entry_t *pte2p, pte2; 921 vm_paddr_t pt2pg_pa; 922 923 /* Get associated entry in PT2TAB. */ 924 pte2p = kern_pt2tab_entry(va); 925 926 /* Just return, if PT2s page exists already. */ 927 pte2 = pt2tab_load(pte2p); 928 if (pte2_is_valid(pte2)) 929 return (pte2_pa(pte2)); 930 931 KASSERT(va >= VM_MAX_KERNEL_ADDRESS, 932 ("%s: NKPT2PG too small", __func__)); 933 934 /* 935 * Allocate page for PT2s and insert it to PT2TAB. 936 * In other words, map it into PT2MAP space. 937 */ 938 pt2pg_pa = pmap_preboot_get_pages(1); 939 pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa)); 940 941 /* Zero all PT2s in allocated page. */ 942 bzero((void*)pt2map_pt2pg(va), PAGE_SIZE); 943 pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE); 944 945 return (pt2pg_pa); 946 } 947 948 /* 949 * Setup L2 page table for given KVA. 950 * Used in pre-bootstrap epoch. 951 */ 952 static void 953 pmap_preboot_pt2_setup(vm_offset_t va) 954 { 955 pt1_entry_t *pte1p; 956 vm_paddr_t pt2pg_pa, pt2_pa; 957 958 /* Setup PT2's page. */ 959 pt2pg_pa = pmap_preboot_pt2pg_setup(va); 960 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va)); 961 962 /* Insert PT2 to PT1. */ 963 pte1p = kern_pte1(va); 964 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 965 } 966 967 /* 968 * Get L2 page entry associated with given KVA. 969 * Used in pre-bootstrap epoch. 970 */ 971 static __inline pt2_entry_t* 972 pmap_preboot_vtopte2(vm_offset_t va) 973 { 974 pt1_entry_t *pte1p; 975 976 /* Setup PT2 if needed. */ 977 pte1p = kern_pte1(va); 978 if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */ 979 pmap_preboot_pt2_setup(va); 980 981 return (pt2map_entry(va)); 982 } 983 984 /* 985 * Pre-bootstrap epoch page(s) mapping(s). 986 */ 987 void 988 pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num) 989 { 990 u_int i; 991 pt2_entry_t *pte2p; 992 993 /* Map all the pages. */ 994 for (i = 0; i < num; i++) { 995 pte2p = pmap_preboot_vtopte2(va); 996 pte2_store(pte2p, PTE2_KRW(pa)); 997 va += PAGE_SIZE; 998 pa += PAGE_SIZE; 999 } 1000 } 1001 1002 /* 1003 * Pre-bootstrap epoch virtual space alocator. 1004 */ 1005 vm_offset_t 1006 pmap_preboot_reserve_pages(u_int num) 1007 { 1008 u_int i; 1009 vm_offset_t start, va; 1010 pt2_entry_t *pte2p; 1011 1012 /* Allocate virtual space. */ 1013 start = va = virtual_avail; 1014 virtual_avail += num * PAGE_SIZE; 1015 1016 /* Zero the mapping. */ 1017 for (i = 0; i < num; i++) { 1018 pte2p = pmap_preboot_vtopte2(va); 1019 pte2_store(pte2p, 0); 1020 va += PAGE_SIZE; 1021 } 1022 1023 return (start); 1024 } 1025 1026 /* 1027 * Pre-bootstrap epoch page(s) allocation and mapping(s). 1028 */ 1029 vm_offset_t 1030 pmap_preboot_get_vpages(u_int num) 1031 { 1032 vm_paddr_t pa; 1033 vm_offset_t va; 1034 1035 /* Allocate physical page(s). */ 1036 pa = pmap_preboot_get_pages(num); 1037 1038 /* Allocate virtual space. */ 1039 va = virtual_avail; 1040 virtual_avail += num * PAGE_SIZE; 1041 1042 /* Map and zero all. */ 1043 pmap_preboot_map_pages(pa, va, num); 1044 bzero((void *)va, num * PAGE_SIZE); 1045 1046 return (va); 1047 } 1048 1049 /* 1050 * Pre-bootstrap epoch page mapping(s) with attributes. 1051 */ 1052 void 1053 pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 1054 vm_prot_t prot, vm_memattr_t attr) 1055 { 1056 u_int num; 1057 u_int l1_attr, l1_prot, l2_prot, l2_attr; 1058 pt1_entry_t *pte1p; 1059 pt2_entry_t *pte2p; 1060 1061 l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR; 1062 l2_prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1063 l2_attr = vm_memattr_to_pte2(attr); 1064 l1_prot = ATTR_TO_L1(l2_prot); 1065 l1_attr = ATTR_TO_L1(l2_attr); 1066 1067 /* Map all the pages. */ 1068 num = round_page(size); 1069 while (num > 0) { 1070 if ((((va | pa) & PTE1_OFFSET) == 0) && (num >= PTE1_SIZE)) { 1071 pte1p = kern_pte1(va); 1072 pte1_store(pte1p, PTE1_KERN(pa, l1_prot, l1_attr)); 1073 va += PTE1_SIZE; 1074 pa += PTE1_SIZE; 1075 num -= PTE1_SIZE; 1076 } else { 1077 pte2p = pmap_preboot_vtopte2(va); 1078 pte2_store(pte2p, PTE2_KERN(pa, l2_prot, l2_attr)); 1079 va += PAGE_SIZE; 1080 pa += PAGE_SIZE; 1081 num -= PAGE_SIZE; 1082 } 1083 } 1084 } 1085 1086 /* 1087 * Extract from the kernel page table the physical address 1088 * that is mapped by the given virtual address "va". 1089 */ 1090 vm_paddr_t 1091 pmap_kextract(vm_offset_t va) 1092 { 1093 vm_paddr_t pa; 1094 pt1_entry_t pte1; 1095 pt2_entry_t pte2; 1096 1097 pte1 = pte1_load(kern_pte1(va)); 1098 if (pte1_is_section(pte1)) { 1099 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1100 } else if (pte1_is_link(pte1)) { 1101 /* 1102 * We should beware of concurrent promotion that changes 1103 * pte1 at this point. However, it's not a problem as PT2 1104 * page is preserved by promotion in PT2TAB. So even if 1105 * it happens, using of PT2MAP is still safe. 1106 * 1107 * QQQ: However, concurrent removing is a problem which 1108 * ends in abort on PT2MAP space. Locking must be used 1109 * to deal with this. 1110 */ 1111 pte2 = pte2_load(pt2map_entry(va)); 1112 pa = pte2_pa(pte2) | (va & PTE2_OFFSET); 1113 } 1114 else { 1115 panic("%s: va %#x pte1 %#x", __func__, va, pte1); 1116 } 1117 return (pa); 1118 } 1119 1120 /* 1121 * Extract from the kernel page table the physical address 1122 * that is mapped by the given virtual address "va". Also 1123 * return L2 page table entry which maps the address. 1124 * 1125 * This is only intended to be used for panic dumps. 1126 */ 1127 vm_paddr_t 1128 pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) 1129 { 1130 vm_paddr_t pa; 1131 pt1_entry_t pte1; 1132 pt2_entry_t pte2; 1133 1134 pte1 = pte1_load(kern_pte1(va)); 1135 if (pte1_is_section(pte1)) { 1136 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1137 pte2 = pa | ATTR_TO_L2(pte1) | PTE2_V; 1138 } else if (pte1_is_link(pte1)) { 1139 pte2 = pte2_load(pt2map_entry(va)); 1140 pa = pte2_pa(pte2); 1141 } else { 1142 pte2 = 0; 1143 pa = 0; 1144 } 1145 if (pte2p != NULL) 1146 *pte2p = pte2; 1147 return (pa); 1148 } 1149 1150 /***************************************************************************** 1151 * 1152 * PMAP second stage initialization and utility functions 1153 * for bootstrap epoch. 1154 * 1155 * After pmap_bootstrap() is called, the following functions for 1156 * mappings can be used: 1157 * 1158 * void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 1159 * void pmap_kremove(vm_offset_t va); 1160 * vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1161 * int prot); 1162 * 1163 * NOTE: This is not SMP coherent stage. And physical page allocation is not 1164 * allowed during this stage. 1165 * 1166 *****************************************************************************/ 1167 1168 /* 1169 * Initialize kernel PMAP locks and lists, kernel_pmap itself, and 1170 * reserve various virtual spaces for temporary mappings. 1171 */ 1172 void 1173 pmap_bootstrap(vm_offset_t firstaddr) 1174 { 1175 pt2_entry_t *unused __unused; 1176 struct pcpu *pc; 1177 1178 /* 1179 * Initialize the kernel pmap (which is statically allocated). 1180 */ 1181 PMAP_LOCK_INIT(kernel_pmap); 1182 kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */ 1183 kernel_pmap->pm_pt1 = kern_pt1; 1184 kernel_pmap->pm_pt2tab = kern_pt2tab; 1185 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 1186 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 1187 1188 /* 1189 * Initialize the global pv list lock. 1190 */ 1191 rw_init(&pvh_global_lock, "pmap pv global"); 1192 1193 LIST_INIT(&allpmaps); 1194 1195 /* 1196 * Request a spin mutex so that changes to allpmaps cannot be 1197 * preempted by smp_rendezvous_cpus(). 1198 */ 1199 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 1200 mtx_lock_spin(&allpmaps_lock); 1201 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 1202 mtx_unlock_spin(&allpmaps_lock); 1203 1204 /* 1205 * Reserve some special page table entries/VA space for temporary 1206 * mapping of pages. 1207 */ 1208 #define SYSMAP(c, p, v, n) do { \ 1209 v = (c)pmap_preboot_reserve_pages(n); \ 1210 p = pt2map_entry((vm_offset_t)v); \ 1211 } while (0) 1212 1213 /* 1214 * Local CMAP1/CMAP2 are used for zeroing and copying pages. 1215 * Local CMAP2 is also used for data cache cleaning. 1216 */ 1217 pc = get_pcpu(); 1218 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1219 SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1); 1220 SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1); 1221 SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1); 1222 1223 /* 1224 * Crashdump maps. 1225 */ 1226 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS); 1227 1228 /* 1229 * _tmppt is used for reading arbitrary physical pages via /dev/mem. 1230 */ 1231 SYSMAP(caddr_t, unused, _tmppt, 1); 1232 1233 /* 1234 * PADDR1 and PADDR2 are used by pmap_pte2_quick() and pmap_pte2(), 1235 * respectively. PADDR3 is used by pmap_pte2_ddb(). 1236 */ 1237 SYSMAP(pt2_entry_t *, PMAP1, PADDR1, 1); 1238 SYSMAP(pt2_entry_t *, PMAP2, PADDR2, 1); 1239 #ifdef DDB 1240 SYSMAP(pt2_entry_t *, PMAP3, PADDR3, 1); 1241 #endif 1242 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 1243 1244 /* 1245 * Note that in very short time in initarm(), we are going to 1246 * initialize phys_avail[] array and no further page allocation 1247 * can happen after that until vm subsystem will be initialized. 1248 */ 1249 kernel_vm_end_new = kernel_vm_end; 1250 virtual_end = vm_max_kernel_address; 1251 } 1252 1253 static void 1254 pmap_init_reserved_pages(void) 1255 { 1256 struct pcpu *pc; 1257 vm_offset_t pages; 1258 int i; 1259 1260 CPU_FOREACH(i) { 1261 pc = pcpu_find(i); 1262 /* 1263 * Skip if the mapping has already been initialized, 1264 * i.e. this is the BSP. 1265 */ 1266 if (pc->pc_cmap1_addr != 0) 1267 continue; 1268 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1269 pages = kva_alloc(PAGE_SIZE * 3); 1270 if (pages == 0) 1271 panic("%s: unable to allocate KVA", __func__); 1272 pc->pc_cmap1_pte2p = pt2map_entry(pages); 1273 pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE); 1274 pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2)); 1275 pc->pc_cmap1_addr = (caddr_t)pages; 1276 pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE); 1277 pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); 1278 } 1279 } 1280 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 1281 1282 /* 1283 * The function can already be use in second initialization stage. 1284 * As such, the function DOES NOT call pmap_growkernel() where PT2 1285 * allocation can happen. So if used, be sure that PT2 for given 1286 * virtual address is allocated already! 1287 * 1288 * Add a wired page to the kva. 1289 * Note: not SMP coherent. 1290 */ 1291 static __inline void 1292 pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot, 1293 uint32_t attr) 1294 { 1295 pt1_entry_t *pte1p; 1296 pt2_entry_t *pte2p; 1297 1298 pte1p = kern_pte1(va); 1299 if (!pte1_is_valid(pte1_load(pte1p))) { /* XXX - sections ?! */ 1300 /* 1301 * This is a very low level function, so PT2 and particularly 1302 * PT2PG associated with given virtual address must be already 1303 * allocated. It's a pain mainly during pmap initialization 1304 * stage. However, called after pmap initialization with 1305 * virtual address not under kernel_vm_end will lead to 1306 * the same misery. 1307 */ 1308 if (!pte2_is_valid(pte2_load(kern_pt2tab_entry(va)))) 1309 panic("%s: kernel PT2 not allocated!", __func__); 1310 } 1311 1312 pte2p = pt2map_entry(va); 1313 pte2_store(pte2p, PTE2_KERN(pa, prot, attr)); 1314 } 1315 1316 PMAP_INLINE void 1317 pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1318 { 1319 1320 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT); 1321 } 1322 1323 /* 1324 * Remove a page from the kernel pagetables. 1325 * Note: not SMP coherent. 1326 */ 1327 PMAP_INLINE void 1328 pmap_kremove(vm_offset_t va) 1329 { 1330 pt1_entry_t *pte1p; 1331 pt2_entry_t *pte2p; 1332 1333 pte1p = kern_pte1(va); 1334 if (pte1_is_section(pte1_load(pte1p))) { 1335 pte1_clear(pte1p); 1336 } else { 1337 pte2p = pt2map_entry(va); 1338 pte2_clear(pte2p); 1339 } 1340 } 1341 1342 /* 1343 * Share new kernel PT2PG with all pmaps. 1344 * The caller is responsible for maintaining TLB consistency. 1345 */ 1346 static void 1347 pmap_kenter_pt2tab(vm_offset_t va, pt2_entry_t npte2) 1348 { 1349 pmap_t pmap; 1350 pt2_entry_t *pte2p; 1351 1352 mtx_lock_spin(&allpmaps_lock); 1353 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1354 pte2p = pmap_pt2tab_entry(pmap, va); 1355 pt2tab_store(pte2p, npte2); 1356 } 1357 mtx_unlock_spin(&allpmaps_lock); 1358 } 1359 1360 /* 1361 * Share new kernel PTE1 with all pmaps. 1362 * The caller is responsible for maintaining TLB consistency. 1363 */ 1364 static void 1365 pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1) 1366 { 1367 pmap_t pmap; 1368 pt1_entry_t *pte1p; 1369 1370 mtx_lock_spin(&allpmaps_lock); 1371 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1372 pte1p = pmap_pte1(pmap, va); 1373 pte1_store(pte1p, npte1); 1374 } 1375 mtx_unlock_spin(&allpmaps_lock); 1376 } 1377 1378 /* 1379 * Used to map a range of physical addresses into kernel 1380 * virtual address space. 1381 * 1382 * The value passed in '*virt' is a suggested virtual address for 1383 * the mapping. Architectures which can support a direct-mapped 1384 * physical to virtual region can return the appropriate address 1385 * within that region, leaving '*virt' unchanged. Other 1386 * architectures should map the pages starting at '*virt' and 1387 * update '*virt' with the first usable address after the mapped 1388 * region. 1389 * 1390 * NOTE: Read the comments above pmap_kenter_prot_attr() as 1391 * the function is used herein! 1392 */ 1393 vm_offset_t 1394 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1395 { 1396 vm_offset_t va, sva; 1397 vm_paddr_t pte1_offset; 1398 pt1_entry_t npte1; 1399 uint32_t l1prot, l2prot; 1400 uint32_t l1attr, l2attr; 1401 1402 PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x)," 1403 " prot = %d\n", __func__, *virt, start, end, end - start, prot)); 1404 1405 l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR; 1406 l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1407 l1prot = ATTR_TO_L1(l2prot); 1408 1409 l2attr = PTE2_ATTR_DEFAULT; 1410 l1attr = ATTR_TO_L1(l2attr); 1411 1412 va = *virt; 1413 /* 1414 * Does the physical address range's size and alignment permit at 1415 * least one section mapping to be created? 1416 */ 1417 pte1_offset = start & PTE1_OFFSET; 1418 if ((end - start) - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) >= 1419 PTE1_SIZE) { 1420 /* 1421 * Increase the starting virtual address so that its alignment 1422 * does not preclude the use of section mappings. 1423 */ 1424 if ((va & PTE1_OFFSET) < pte1_offset) 1425 va = pte1_trunc(va) + pte1_offset; 1426 else if ((va & PTE1_OFFSET) > pte1_offset) 1427 va = pte1_roundup(va) + pte1_offset; 1428 } 1429 sva = va; 1430 while (start < end) { 1431 if ((start & PTE1_OFFSET) == 0 && end - start >= PTE1_SIZE) { 1432 KASSERT((va & PTE1_OFFSET) == 0, 1433 ("%s: misaligned va %#x", __func__, va)); 1434 npte1 = PTE1_KERN(start, l1prot, l1attr); 1435 pmap_kenter_pte1(va, npte1); 1436 va += PTE1_SIZE; 1437 start += PTE1_SIZE; 1438 } else { 1439 pmap_kenter_prot_attr(va, start, l2prot, l2attr); 1440 va += PAGE_SIZE; 1441 start += PAGE_SIZE; 1442 } 1443 } 1444 tlb_flush_range(sva, va - sva); 1445 *virt = va; 1446 return (sva); 1447 } 1448 1449 /* 1450 * Make a temporary mapping for a physical address. 1451 * This is only intended to be used for panic dumps. 1452 */ 1453 void * 1454 pmap_kenter_temporary(vm_paddr_t pa, int i) 1455 { 1456 vm_offset_t va; 1457 1458 /* QQQ: 'i' should be less or equal to MAXDUMPPGS. */ 1459 1460 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 1461 pmap_kenter(va, pa); 1462 tlb_flush_local(va); 1463 return ((void *)crashdumpmap); 1464 } 1465 1466 1467 /************************************* 1468 * 1469 * TLB & cache maintenance routines. 1470 * 1471 *************************************/ 1472 1473 /* 1474 * We inline these within pmap.c for speed. 1475 */ 1476 PMAP_INLINE void 1477 pmap_tlb_flush(pmap_t pmap, vm_offset_t va) 1478 { 1479 1480 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1481 tlb_flush(va); 1482 } 1483 1484 PMAP_INLINE void 1485 pmap_tlb_flush_range(pmap_t pmap, vm_offset_t sva, vm_size_t size) 1486 { 1487 1488 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1489 tlb_flush_range(sva, size); 1490 } 1491 1492 /* 1493 * Abuse the pte2 nodes for unmapped kva to thread a kva freelist through. 1494 * Requirements: 1495 * - Must deal with pages in order to ensure that none of the PTE2_* bits 1496 * are ever set, PTE2_V in particular. 1497 * - Assumes we can write to pte2s without pte2_store() atomic ops. 1498 * - Assumes nothing will ever test these addresses for 0 to indicate 1499 * no mapping instead of correctly checking PTE2_V. 1500 * - Assumes a vm_offset_t will fit in a pte2 (true for arm). 1501 * Because PTE2_V is never set, there can be no mappings to invalidate. 1502 */ 1503 static vm_offset_t 1504 pmap_pte2list_alloc(vm_offset_t *head) 1505 { 1506 pt2_entry_t *pte2p; 1507 vm_offset_t va; 1508 1509 va = *head; 1510 if (va == 0) 1511 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 1512 pte2p = pt2map_entry(va); 1513 *head = *pte2p; 1514 if (*head & PTE2_V) 1515 panic("%s: va with PTE2_V set!", __func__); 1516 *pte2p = 0; 1517 return (va); 1518 } 1519 1520 static void 1521 pmap_pte2list_free(vm_offset_t *head, vm_offset_t va) 1522 { 1523 pt2_entry_t *pte2p; 1524 1525 if (va & PTE2_V) 1526 panic("%s: freeing va with PTE2_V set!", __func__); 1527 pte2p = pt2map_entry(va); 1528 *pte2p = *head; /* virtual! PTE2_V is 0 though */ 1529 *head = va; 1530 } 1531 1532 static void 1533 pmap_pte2list_init(vm_offset_t *head, void *base, int npages) 1534 { 1535 int i; 1536 vm_offset_t va; 1537 1538 *head = 0; 1539 for (i = npages - 1; i >= 0; i--) { 1540 va = (vm_offset_t)base + i * PAGE_SIZE; 1541 pmap_pte2list_free(head, va); 1542 } 1543 } 1544 1545 /***************************************************************************** 1546 * 1547 * PMAP third and final stage initialization. 1548 * 1549 * After pmap_init() is called, PMAP subsystem is fully initialized. 1550 * 1551 *****************************************************************************/ 1552 1553 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 1554 1555 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 1556 "Max number of PV entries"); 1557 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 1558 "Page share factor per proc"); 1559 1560 static u_long nkpt2pg = NKPT2PG; 1561 SYSCTL_ULONG(_vm_pmap, OID_AUTO, nkpt2pg, CTLFLAG_RD, 1562 &nkpt2pg, 0, "Pre-allocated pages for kernel PT2s"); 1563 1564 static int sp_enabled = 1; 1565 SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 1566 &sp_enabled, 0, "Are large page mappings enabled?"); 1567 1568 bool 1569 pmap_ps_enabled(pmap_t pmap __unused) 1570 { 1571 1572 return (sp_enabled != 0); 1573 } 1574 1575 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pte1, CTLFLAG_RD, 0, 1576 "1MB page mapping counters"); 1577 1578 static u_long pmap_pte1_demotions; 1579 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, demotions, CTLFLAG_RD, 1580 &pmap_pte1_demotions, 0, "1MB page demotions"); 1581 1582 static u_long pmap_pte1_mappings; 1583 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, mappings, CTLFLAG_RD, 1584 &pmap_pte1_mappings, 0, "1MB page mappings"); 1585 1586 static u_long pmap_pte1_p_failures; 1587 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, p_failures, CTLFLAG_RD, 1588 &pmap_pte1_p_failures, 0, "1MB page promotion failures"); 1589 1590 static u_long pmap_pte1_promotions; 1591 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD, 1592 &pmap_pte1_promotions, 0, "1MB page promotions"); 1593 1594 static u_long pmap_pte1_kern_demotions; 1595 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD, 1596 &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions"); 1597 1598 static u_long pmap_pte1_kern_promotions; 1599 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD, 1600 &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions"); 1601 1602 static __inline ttb_entry_t 1603 pmap_ttb_get(pmap_t pmap) 1604 { 1605 1606 return (vtophys(pmap->pm_pt1) | ttb_flags); 1607 } 1608 1609 /* 1610 * Initialize a vm_page's machine-dependent fields. 1611 * 1612 * Variations: 1613 * 1. Pages for L2 page tables are always not managed. So, pv_list and 1614 * pt2_wirecount can share same physical space. However, proper 1615 * initialization on a page alloc for page tables and reinitialization 1616 * on the page free must be ensured. 1617 */ 1618 void 1619 pmap_page_init(vm_page_t m) 1620 { 1621 1622 TAILQ_INIT(&m->md.pv_list); 1623 pt2_wirecount_init(m); 1624 m->md.pat_mode = VM_MEMATTR_DEFAULT; 1625 } 1626 1627 /* 1628 * Virtualization for faster way how to zero whole page. 1629 */ 1630 static __inline void 1631 pagezero(void *page) 1632 { 1633 1634 bzero(page, PAGE_SIZE); 1635 } 1636 1637 /* 1638 * Zero L2 page table page. 1639 * Use same KVA as in pmap_zero_page(). 1640 */ 1641 static __inline vm_paddr_t 1642 pmap_pt2pg_zero(vm_page_t m) 1643 { 1644 pt2_entry_t *cmap2_pte2p; 1645 vm_paddr_t pa; 1646 struct pcpu *pc; 1647 1648 pa = VM_PAGE_TO_PHYS(m); 1649 1650 /* 1651 * XXX: For now, we map whole page even if it's already zero, 1652 * to sync it even if the sync is only DSB. 1653 */ 1654 sched_pin(); 1655 pc = get_pcpu(); 1656 cmap2_pte2p = pc->pc_cmap2_pte2p; 1657 mtx_lock(&pc->pc_cmap_lock); 1658 if (pte2_load(cmap2_pte2p) != 0) 1659 panic("%s: CMAP2 busy", __func__); 1660 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 1661 vm_page_pte2_attr(m))); 1662 /* Even VM_ALLOC_ZERO request is only advisory. */ 1663 if ((m->flags & PG_ZERO) == 0) 1664 pagezero(pc->pc_cmap2_addr); 1665 pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE); 1666 pte2_clear(cmap2_pte2p); 1667 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 1668 1669 /* 1670 * Unpin the thread before releasing the lock. Otherwise the thread 1671 * could be rescheduled while still bound to the current CPU, only 1672 * to unpin itself immediately upon resuming execution. 1673 */ 1674 sched_unpin(); 1675 mtx_unlock(&pc->pc_cmap_lock); 1676 1677 return (pa); 1678 } 1679 1680 /* 1681 * Init just allocated page as L2 page table(s) holder 1682 * and return its physical address. 1683 */ 1684 static __inline vm_paddr_t 1685 pmap_pt2pg_init(pmap_t pmap, vm_offset_t va, vm_page_t m) 1686 { 1687 vm_paddr_t pa; 1688 pt2_entry_t *pte2p; 1689 1690 /* Check page attributes. */ 1691 if (m->md.pat_mode != pt_memattr) 1692 pmap_page_set_memattr(m, pt_memattr); 1693 1694 /* Zero page and init wire counts. */ 1695 pa = pmap_pt2pg_zero(m); 1696 pt2_wirecount_init(m); 1697 1698 /* 1699 * Map page to PT2MAP address space for given pmap. 1700 * Note that PT2MAP space is shared with all pmaps. 1701 */ 1702 if (pmap == kernel_pmap) 1703 pmap_kenter_pt2tab(va, PTE2_KPT(pa)); 1704 else { 1705 pte2p = pmap_pt2tab_entry(pmap, va); 1706 pt2tab_store(pte2p, PTE2_KPT_NG(pa)); 1707 } 1708 1709 return (pa); 1710 } 1711 1712 /* 1713 * Initialize the pmap module. 1714 * Called by vm_init, to initialize any structures that the pmap 1715 * system needs to map virtual memory. 1716 */ 1717 void 1718 pmap_init(void) 1719 { 1720 vm_size_t s; 1721 pt2_entry_t *pte2p, pte2; 1722 u_int i, pte1_idx, pv_npg; 1723 1724 PDEBUG(1, printf("%s: phys_start = %#x\n", __func__, PHYSADDR)); 1725 1726 /* 1727 * Initialize the vm page array entries for kernel pmap's 1728 * L2 page table pages allocated in advance. 1729 */ 1730 pte1_idx = pte1_index(KERNBASE - PT2MAP_SIZE); 1731 pte2p = kern_pt2tab_entry(KERNBASE - PT2MAP_SIZE); 1732 for (i = 0; i < nkpt2pg + NPG_IN_PT2TAB; i++, pte2p++) { 1733 vm_paddr_t pa; 1734 vm_page_t m; 1735 1736 pte2 = pte2_load(pte2p); 1737 KASSERT(pte2_is_valid(pte2), ("%s: no valid entry", __func__)); 1738 1739 pa = pte2_pa(pte2); 1740 m = PHYS_TO_VM_PAGE(pa); 1741 KASSERT(m >= vm_page_array && 1742 m < &vm_page_array[vm_page_array_size], 1743 ("%s: L2 page table page is out of range", __func__)); 1744 1745 m->pindex = pte1_idx; 1746 m->phys_addr = pa; 1747 pte1_idx += NPT2_IN_PG; 1748 } 1749 1750 /* 1751 * Initialize the address space (zone) for the pv entries. Set a 1752 * high water mark so that the system can recover from excessive 1753 * numbers of pv entries. 1754 */ 1755 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1756 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1757 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1758 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1759 pv_entry_high_water = 9 * (pv_entry_max / 10); 1760 1761 /* 1762 * Are large page mappings enabled? 1763 */ 1764 TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled); 1765 if (sp_enabled) { 1766 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1767 ("%s: can't assign to pagesizes[1]", __func__)); 1768 pagesizes[1] = PTE1_SIZE; 1769 } 1770 1771 /* 1772 * Calculate the size of the pv head table for sections. 1773 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1774 * Note that the table is only for sections which could be promoted. 1775 */ 1776 first_managed_pa = pte1_trunc(vm_phys_segs[0].start); 1777 pv_npg = (pte1_trunc(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE) 1778 - first_managed_pa) / PTE1_SIZE + 1; 1779 1780 /* 1781 * Allocate memory for the pv head table for sections. 1782 */ 1783 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1784 s = round_page(s); 1785 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); 1786 for (i = 0; i < pv_npg; i++) 1787 TAILQ_INIT(&pv_table[i].pv_list); 1788 1789 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1790 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1791 if (pv_chunkbase == NULL) 1792 panic("%s: not enough kvm for pv chunks", __func__); 1793 pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1794 } 1795 1796 /* 1797 * Add a list of wired pages to the kva 1798 * this routine is only used for temporary 1799 * kernel mappings that do not need to have 1800 * page modification or references recorded. 1801 * Note that old mappings are simply written 1802 * over. The page *must* be wired. 1803 * Note: SMP coherent. Uses a ranged shootdown IPI. 1804 */ 1805 void 1806 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1807 { 1808 u_int anychanged; 1809 pt2_entry_t *epte2p, *pte2p, pte2; 1810 vm_page_t m; 1811 vm_paddr_t pa; 1812 1813 anychanged = 0; 1814 pte2p = pt2map_entry(sva); 1815 epte2p = pte2p + count; 1816 while (pte2p < epte2p) { 1817 m = *ma++; 1818 pa = VM_PAGE_TO_PHYS(m); 1819 pte2 = pte2_load(pte2p); 1820 if ((pte2_pa(pte2) != pa) || 1821 (pte2_attr(pte2) != vm_page_pte2_attr(m))) { 1822 anychanged++; 1823 pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW, 1824 vm_page_pte2_attr(m))); 1825 } 1826 pte2p++; 1827 } 1828 if (__predict_false(anychanged)) 1829 tlb_flush_range(sva, count * PAGE_SIZE); 1830 } 1831 1832 /* 1833 * This routine tears out page mappings from the 1834 * kernel -- it is meant only for temporary mappings. 1835 * Note: SMP coherent. Uses a ranged shootdown IPI. 1836 */ 1837 void 1838 pmap_qremove(vm_offset_t sva, int count) 1839 { 1840 vm_offset_t va; 1841 1842 va = sva; 1843 while (count-- > 0) { 1844 pmap_kremove(va); 1845 va += PAGE_SIZE; 1846 } 1847 tlb_flush_range(sva, va - sva); 1848 } 1849 1850 /* 1851 * Are we current address space or kernel? 1852 */ 1853 static __inline int 1854 pmap_is_current(pmap_t pmap) 1855 { 1856 1857 return (pmap == kernel_pmap || 1858 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace))); 1859 } 1860 1861 /* 1862 * If the given pmap is not the current or kernel pmap, the returned 1863 * pte2 must be released by passing it to pmap_pte2_release(). 1864 */ 1865 static pt2_entry_t * 1866 pmap_pte2(pmap_t pmap, vm_offset_t va) 1867 { 1868 pt1_entry_t pte1; 1869 vm_paddr_t pt2pg_pa; 1870 1871 pte1 = pte1_load(pmap_pte1(pmap, va)); 1872 if (pte1_is_section(pte1)) 1873 panic("%s: attempt to map PTE1", __func__); 1874 if (pte1_is_link(pte1)) { 1875 /* Are we current address space or kernel? */ 1876 if (pmap_is_current(pmap)) 1877 return (pt2map_entry(va)); 1878 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1879 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1880 mtx_lock(&PMAP2mutex); 1881 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 1882 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 1883 tlb_flush((vm_offset_t)PADDR2); 1884 } 1885 return (PADDR2 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1886 } 1887 return (NULL); 1888 } 1889 1890 /* 1891 * Releases a pte2 that was obtained from pmap_pte2(). 1892 * Be prepared for the pte2p being NULL. 1893 */ 1894 static __inline void 1895 pmap_pte2_release(pt2_entry_t *pte2p) 1896 { 1897 1898 if ((pt2_entry_t *)(trunc_page((vm_offset_t)pte2p)) == PADDR2) { 1899 mtx_unlock(&PMAP2mutex); 1900 } 1901 } 1902 1903 /* 1904 * Super fast pmap_pte2 routine best used when scanning 1905 * the pv lists. This eliminates many coarse-grained 1906 * invltlb calls. Note that many of the pv list 1907 * scans are across different pmaps. It is very wasteful 1908 * to do an entire tlb flush for checking a single mapping. 1909 * 1910 * If the given pmap is not the current pmap, pvh_global_lock 1911 * must be held and curthread pinned to a CPU. 1912 */ 1913 static pt2_entry_t * 1914 pmap_pte2_quick(pmap_t pmap, vm_offset_t va) 1915 { 1916 pt1_entry_t pte1; 1917 vm_paddr_t pt2pg_pa; 1918 1919 pte1 = pte1_load(pmap_pte1(pmap, va)); 1920 if (pte1_is_section(pte1)) 1921 panic("%s: attempt to map PTE1", __func__); 1922 if (pte1_is_link(pte1)) { 1923 /* Are we current address space or kernel? */ 1924 if (pmap_is_current(pmap)) 1925 return (pt2map_entry(va)); 1926 rw_assert(&pvh_global_lock, RA_WLOCKED); 1927 KASSERT(curthread->td_pinned > 0, 1928 ("%s: curthread not pinned", __func__)); 1929 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1930 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1931 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 1932 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 1933 #ifdef SMP 1934 PMAP1cpu = PCPU_GET(cpuid); 1935 #endif 1936 tlb_flush_local((vm_offset_t)PADDR1); 1937 PMAP1changed++; 1938 } else 1939 #ifdef SMP 1940 if (PMAP1cpu != PCPU_GET(cpuid)) { 1941 PMAP1cpu = PCPU_GET(cpuid); 1942 tlb_flush_local((vm_offset_t)PADDR1); 1943 PMAP1changedcpu++; 1944 } else 1945 #endif 1946 PMAP1unchanged++; 1947 return (PADDR1 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1948 } 1949 return (NULL); 1950 } 1951 1952 /* 1953 * Routine: pmap_extract 1954 * Function: 1955 * Extract the physical page address associated 1956 * with the given map/virtual_address pair. 1957 */ 1958 vm_paddr_t 1959 pmap_extract(pmap_t pmap, vm_offset_t va) 1960 { 1961 vm_paddr_t pa; 1962 pt1_entry_t pte1; 1963 pt2_entry_t *pte2p; 1964 1965 PMAP_LOCK(pmap); 1966 pte1 = pte1_load(pmap_pte1(pmap, va)); 1967 if (pte1_is_section(pte1)) 1968 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1969 else if (pte1_is_link(pte1)) { 1970 pte2p = pmap_pte2(pmap, va); 1971 pa = pte2_pa(pte2_load(pte2p)) | (va & PTE2_OFFSET); 1972 pmap_pte2_release(pte2p); 1973 } else 1974 pa = 0; 1975 PMAP_UNLOCK(pmap); 1976 return (pa); 1977 } 1978 1979 /* 1980 * Routine: pmap_extract_and_hold 1981 * Function: 1982 * Atomically extract and hold the physical page 1983 * with the given pmap and virtual address pair 1984 * if that mapping permits the given protection. 1985 */ 1986 vm_page_t 1987 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1988 { 1989 vm_paddr_t pa, lockpa; 1990 pt1_entry_t pte1; 1991 pt2_entry_t pte2, *pte2p; 1992 vm_page_t m; 1993 1994 lockpa = 0; 1995 m = NULL; 1996 PMAP_LOCK(pmap); 1997 retry: 1998 pte1 = pte1_load(pmap_pte1(pmap, va)); 1999 if (pte1_is_section(pte1)) { 2000 if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) { 2001 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 2002 if (vm_page_pa_tryrelock(pmap, pa, &lockpa)) 2003 goto retry; 2004 m = PHYS_TO_VM_PAGE(pa); 2005 vm_page_hold(m); 2006 } 2007 } else if (pte1_is_link(pte1)) { 2008 pte2p = pmap_pte2(pmap, va); 2009 pte2 = pte2_load(pte2p); 2010 pmap_pte2_release(pte2p); 2011 if (pte2_is_valid(pte2) && 2012 (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) { 2013 pa = pte2_pa(pte2); 2014 if (vm_page_pa_tryrelock(pmap, pa, &lockpa)) 2015 goto retry; 2016 m = PHYS_TO_VM_PAGE(pa); 2017 vm_page_hold(m); 2018 } 2019 } 2020 PA_UNLOCK_COND(lockpa); 2021 PMAP_UNLOCK(pmap); 2022 return (m); 2023 } 2024 2025 /* 2026 * Grow the number of kernel L2 page table entries, if needed. 2027 */ 2028 void 2029 pmap_growkernel(vm_offset_t addr) 2030 { 2031 vm_page_t m; 2032 vm_paddr_t pt2pg_pa, pt2_pa; 2033 pt1_entry_t pte1; 2034 pt2_entry_t pte2; 2035 2036 PDEBUG(1, printf("%s: addr = %#x\n", __func__, addr)); 2037 /* 2038 * All the time kernel_vm_end is first KVA for which underlying 2039 * L2 page table is either not allocated or linked from L1 page table 2040 * (not considering sections). Except for two possible cases: 2041 * 2042 * (1) in the very beginning as long as pmap_growkernel() was 2043 * not called, it could be first unused KVA (which is not 2044 * rounded up to PTE1_SIZE), 2045 * 2046 * (2) when all KVA space is mapped and vm_map_max(kernel_map) 2047 * address is not rounded up to PTE1_SIZE. (For example, 2048 * it could be 0xFFFFFFFF.) 2049 */ 2050 kernel_vm_end = pte1_roundup(kernel_vm_end); 2051 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2052 addr = roundup2(addr, PTE1_SIZE); 2053 if (addr - 1 >= vm_map_max(kernel_map)) 2054 addr = vm_map_max(kernel_map); 2055 while (kernel_vm_end < addr) { 2056 pte1 = pte1_load(kern_pte1(kernel_vm_end)); 2057 if (pte1_is_valid(pte1)) { 2058 kernel_vm_end += PTE1_SIZE; 2059 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2060 kernel_vm_end = vm_map_max(kernel_map); 2061 break; 2062 } 2063 continue; 2064 } 2065 2066 /* 2067 * kernel_vm_end_new is used in pmap_pinit() when kernel 2068 * mappings are entered to new pmap all at once to avoid race 2069 * between pmap_kenter_pte1() and kernel_vm_end increase. 2070 * The same aplies to pmap_kenter_pt2tab(). 2071 */ 2072 kernel_vm_end_new = kernel_vm_end + PTE1_SIZE; 2073 2074 pte2 = pt2tab_load(kern_pt2tab_entry(kernel_vm_end)); 2075 if (!pte2_is_valid(pte2)) { 2076 /* 2077 * Install new PT2s page into kernel PT2TAB. 2078 */ 2079 m = vm_page_alloc(NULL, 2080 pte1_index(kernel_vm_end) & ~PT2PG_MASK, 2081 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 2082 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2083 if (m == NULL) 2084 panic("%s: no memory to grow kernel", __func__); 2085 /* 2086 * QQQ: To link all new L2 page tables from L1 page 2087 * table now and so pmap_kenter_pte1() them 2088 * at once together with pmap_kenter_pt2tab() 2089 * could be nice speed up. However, 2090 * pmap_growkernel() does not happen so often... 2091 * QQQ: The other TTBR is another option. 2092 */ 2093 pt2pg_pa = pmap_pt2pg_init(kernel_pmap, kernel_vm_end, 2094 m); 2095 } else 2096 pt2pg_pa = pte2_pa(pte2); 2097 2098 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(kernel_vm_end)); 2099 pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa)); 2100 2101 kernel_vm_end = kernel_vm_end_new; 2102 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2103 kernel_vm_end = vm_map_max(kernel_map); 2104 break; 2105 } 2106 } 2107 } 2108 2109 static int 2110 kvm_size(SYSCTL_HANDLER_ARGS) 2111 { 2112 unsigned long ksize = vm_max_kernel_address - KERNBASE; 2113 2114 return (sysctl_handle_long(oidp, &ksize, 0, req)); 2115 } 2116 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 2117 0, 0, kvm_size, "IU", "Size of KVM"); 2118 2119 static int 2120 kvm_free(SYSCTL_HANDLER_ARGS) 2121 { 2122 unsigned long kfree = vm_max_kernel_address - kernel_vm_end; 2123 2124 return (sysctl_handle_long(oidp, &kfree, 0, req)); 2125 } 2126 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 2127 0, 0, kvm_free, "IU", "Amount of KVM free"); 2128 2129 /*********************************************** 2130 * 2131 * Pmap allocation/deallocation routines. 2132 * 2133 ***********************************************/ 2134 2135 /* 2136 * Initialize the pmap for the swapper process. 2137 */ 2138 void 2139 pmap_pinit0(pmap_t pmap) 2140 { 2141 PDEBUG(1, printf("%s: pmap = %p\n", __func__, pmap)); 2142 2143 PMAP_LOCK_INIT(pmap); 2144 2145 /* 2146 * Kernel page table directory and pmap stuff around is already 2147 * initialized, we are using it right now and here. So, finish 2148 * only PMAP structures initialization for process0 ... 2149 * 2150 * Since the L1 page table and PT2TAB is shared with the kernel pmap, 2151 * which is already included in the list "allpmaps", this pmap does 2152 * not need to be inserted into that list. 2153 */ 2154 pmap->pm_pt1 = kern_pt1; 2155 pmap->pm_pt2tab = kern_pt2tab; 2156 CPU_ZERO(&pmap->pm_active); 2157 PCPU_SET(curpmap, pmap); 2158 TAILQ_INIT(&pmap->pm_pvchunk); 2159 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2160 CPU_SET(0, &pmap->pm_active); 2161 } 2162 2163 static __inline void 2164 pte1_copy_nosync(pt1_entry_t *spte1p, pt1_entry_t *dpte1p, vm_offset_t sva, 2165 vm_offset_t eva) 2166 { 2167 u_int idx, count; 2168 2169 idx = pte1_index(sva); 2170 count = (pte1_index(eva) - idx + 1) * sizeof(pt1_entry_t); 2171 bcopy(spte1p + idx, dpte1p + idx, count); 2172 } 2173 2174 static __inline void 2175 pt2tab_copy_nosync(pt2_entry_t *spte2p, pt2_entry_t *dpte2p, vm_offset_t sva, 2176 vm_offset_t eva) 2177 { 2178 u_int idx, count; 2179 2180 idx = pt2tab_index(sva); 2181 count = (pt2tab_index(eva) - idx + 1) * sizeof(pt2_entry_t); 2182 bcopy(spte2p + idx, dpte2p + idx, count); 2183 } 2184 2185 /* 2186 * Initialize a preallocated and zeroed pmap structure, 2187 * such as one in a vmspace structure. 2188 */ 2189 int 2190 pmap_pinit(pmap_t pmap) 2191 { 2192 pt1_entry_t *pte1p; 2193 pt2_entry_t *pte2p; 2194 vm_paddr_t pa, pt2tab_pa; 2195 u_int i; 2196 2197 PDEBUG(6, printf("%s: pmap = %p, pm_pt1 = %p\n", __func__, pmap, 2198 pmap->pm_pt1)); 2199 2200 /* 2201 * No need to allocate L2 page table space yet but we do need 2202 * a valid L1 page table and PT2TAB table. 2203 * 2204 * Install shared kernel mappings to these tables. It's a little 2205 * tricky as some parts of KVA are reserved for vectors, devices, 2206 * and whatever else. These parts are supposed to be above 2207 * vm_max_kernel_address. Thus two regions should be installed: 2208 * 2209 * (1) <KERNBASE, kernel_vm_end), 2210 * (2) <vm_max_kernel_address, 0xFFFFFFFF>. 2211 * 2212 * QQQ: The second region should be stable enough to be installed 2213 * only once in time when the tables are allocated. 2214 * QQQ: Maybe copy of both regions at once could be faster ... 2215 * QQQ: Maybe the other TTBR is an option. 2216 * 2217 * Finally, install own PT2TAB table to these tables. 2218 */ 2219 2220 if (pmap->pm_pt1 == NULL) { 2221 pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(NB_IN_PT1, 2222 M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr); 2223 if (pmap->pm_pt1 == NULL) 2224 return (0); 2225 } 2226 if (pmap->pm_pt2tab == NULL) { 2227 /* 2228 * QQQ: (1) PT2TAB must be contiguous. If PT2TAB is one page 2229 * only, what should be the only size for 32 bit systems, 2230 * then we could allocate it with vm_page_alloc() and all 2231 * the stuff needed as other L2 page table pages. 2232 * (2) Note that a process PT2TAB is special L2 page table 2233 * page. Its mapping in kernel_arena is permanent and can 2234 * be used no matter which process is current. Its mapping 2235 * in PT2MAP can be used only for current process. 2236 */ 2237 pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(NB_IN_PT2TAB, 2238 M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr); 2239 if (pmap->pm_pt2tab == NULL) { 2240 /* 2241 * QQQ: As struct pmap is allocated from UMA with 2242 * UMA_ZONE_NOFREE flag, it's important to leave 2243 * no allocation in pmap if initialization failed. 2244 */ 2245 kmem_free((vm_offset_t)pmap->pm_pt1, NB_IN_PT1); 2246 pmap->pm_pt1 = NULL; 2247 return (0); 2248 } 2249 /* 2250 * QQQ: Each L2 page table page vm_page_t has pindex set to 2251 * pte1 index of virtual address mapped by this page. 2252 * It's not valid for non kernel PT2TABs themselves. 2253 * The pindex of these pages can not be altered because 2254 * of the way how they are allocated now. However, it 2255 * should not be a problem. 2256 */ 2257 } 2258 2259 mtx_lock_spin(&allpmaps_lock); 2260 /* 2261 * To avoid race with pmap_kenter_pte1() and pmap_kenter_pt2tab(), 2262 * kernel_vm_end_new is used here instead of kernel_vm_end. 2263 */ 2264 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, KERNBASE, 2265 kernel_vm_end_new - 1); 2266 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, vm_max_kernel_address, 2267 0xFFFFFFFF); 2268 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, KERNBASE, 2269 kernel_vm_end_new - 1); 2270 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, vm_max_kernel_address, 2271 0xFFFFFFFF); 2272 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 2273 mtx_unlock_spin(&allpmaps_lock); 2274 2275 /* 2276 * Store PT2MAP PT2 pages (a.k.a. PT2TAB) in PT2TAB itself. 2277 * I.e. self reference mapping. The PT2TAB is private, however mapped 2278 * into shared PT2MAP space, so the mapping should be not global. 2279 */ 2280 pt2tab_pa = vtophys(pmap->pm_pt2tab); 2281 pte2p = pmap_pt2tab_entry(pmap, (vm_offset_t)PT2MAP); 2282 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 2283 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 2284 } 2285 2286 /* Insert PT2MAP PT2s into pmap PT1. */ 2287 pte1p = pmap_pte1(pmap, (vm_offset_t)PT2MAP); 2288 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 2289 pte1_store(pte1p++, PTE1_LINK(pa)); 2290 } 2291 2292 /* 2293 * Now synchronize new mapping which was made above. 2294 */ 2295 pte1_sync_range(pmap->pm_pt1, NB_IN_PT1); 2296 pte2_sync_range(pmap->pm_pt2tab, NB_IN_PT2TAB); 2297 2298 CPU_ZERO(&pmap->pm_active); 2299 TAILQ_INIT(&pmap->pm_pvchunk); 2300 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2301 2302 return (1); 2303 } 2304 2305 #ifdef INVARIANTS 2306 static boolean_t 2307 pt2tab_user_is_empty(pt2_entry_t *tab) 2308 { 2309 u_int i, end; 2310 2311 end = pt2tab_index(VM_MAXUSER_ADDRESS); 2312 for (i = 0; i < end; i++) 2313 if (tab[i] != 0) return (FALSE); 2314 return (TRUE); 2315 } 2316 #endif 2317 /* 2318 * Release any resources held by the given physical map. 2319 * Called when a pmap initialized by pmap_pinit is being released. 2320 * Should only be called if the map contains no valid mappings. 2321 */ 2322 void 2323 pmap_release(pmap_t pmap) 2324 { 2325 #ifdef INVARIANTS 2326 vm_offset_t start, end; 2327 #endif 2328 KASSERT(pmap->pm_stats.resident_count == 0, 2329 ("%s: pmap resident count %ld != 0", __func__, 2330 pmap->pm_stats.resident_count)); 2331 KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab), 2332 ("%s: has allocated user PT2(s)", __func__)); 2333 KASSERT(CPU_EMPTY(&pmap->pm_active), 2334 ("%s: pmap %p is active on some CPU(s)", __func__, pmap)); 2335 2336 mtx_lock_spin(&allpmaps_lock); 2337 LIST_REMOVE(pmap, pm_list); 2338 mtx_unlock_spin(&allpmaps_lock); 2339 2340 #ifdef INVARIANTS 2341 start = pte1_index(KERNBASE) * sizeof(pt1_entry_t); 2342 end = (pte1_index(0xFFFFFFFF) + 1) * sizeof(pt1_entry_t); 2343 bzero((char *)pmap->pm_pt1 + start, end - start); 2344 2345 start = pt2tab_index(KERNBASE) * sizeof(pt2_entry_t); 2346 end = (pt2tab_index(0xFFFFFFFF) + 1) * sizeof(pt2_entry_t); 2347 bzero((char *)pmap->pm_pt2tab + start, end - start); 2348 #endif 2349 /* 2350 * We are leaving PT1 and PT2TAB allocated on released pmap, 2351 * so hopefully UMA vmspace_zone will always be inited with 2352 * UMA_ZONE_NOFREE flag. 2353 */ 2354 } 2355 2356 /********************************************************* 2357 * 2358 * L2 table pages and their pages management routines. 2359 * 2360 *********************************************************/ 2361 2362 /* 2363 * Virtual interface for L2 page table wire counting. 2364 * 2365 * Each L2 page table in a page has own counter which counts a number of 2366 * valid mappings in a table. Global page counter counts mappings in all 2367 * tables in a page plus a single itself mapping in PT2TAB. 2368 * 2369 * During a promotion we leave the associated L2 page table counter 2370 * untouched, so the table (strictly speaking a page which holds it) 2371 * is never freed if promoted. 2372 * 2373 * If a page m->wire_count == 1 then no valid mappings exist in any L2 page 2374 * table in the page and the page itself is only mapped in PT2TAB. 2375 */ 2376 2377 static __inline void 2378 pt2_wirecount_init(vm_page_t m) 2379 { 2380 u_int i; 2381 2382 /* 2383 * Note: A page m is allocated with VM_ALLOC_WIRED flag and 2384 * m->wire_count should be already set correctly. 2385 * So, there is no need to set it again herein. 2386 */ 2387 for (i = 0; i < NPT2_IN_PG; i++) 2388 m->md.pt2_wirecount[i] = 0; 2389 } 2390 2391 static __inline void 2392 pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx) 2393 { 2394 2395 /* 2396 * Note: A just modificated pte2 (i.e. already allocated) 2397 * is acquiring one extra reference which must be 2398 * explicitly cleared. It influences the KASSERTs herein. 2399 * All L2 page tables in a page always belong to the same 2400 * pmap, so we allow only one extra reference for the page. 2401 */ 2402 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1), 2403 ("%s: PT2 is overflowing ...", __func__)); 2404 KASSERT(m->wire_count <= (NPTE2_IN_PG + 1), 2405 ("%s: PT2PG is overflowing ...", __func__)); 2406 2407 m->wire_count++; 2408 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++; 2409 } 2410 2411 static __inline void 2412 pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx) 2413 { 2414 2415 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0, 2416 ("%s: PT2 is underflowing ...", __func__)); 2417 KASSERT(m->wire_count > 1, 2418 ("%s: PT2PG is underflowing ...", __func__)); 2419 2420 m->wire_count--; 2421 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--; 2422 } 2423 2424 static __inline void 2425 pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count) 2426 { 2427 2428 KASSERT(count <= NPTE2_IN_PT2, 2429 ("%s: invalid count %u", __func__, count)); 2430 KASSERT(m->wire_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK], 2431 ("%s: PT2PG corrupting (%u, %u) ...", __func__, m->wire_count, 2432 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK])); 2433 2434 m->wire_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]; 2435 m->wire_count += count; 2436 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count; 2437 2438 KASSERT(m->wire_count <= (NPTE2_IN_PG + 1), 2439 ("%s: PT2PG is overflowed (%u) ...", __func__, m->wire_count)); 2440 } 2441 2442 static __inline uint32_t 2443 pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx) 2444 { 2445 2446 return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]); 2447 } 2448 2449 static __inline boolean_t 2450 pt2_is_empty(vm_page_t m, vm_offset_t va) 2451 { 2452 2453 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0); 2454 } 2455 2456 static __inline boolean_t 2457 pt2_is_full(vm_page_t m, vm_offset_t va) 2458 { 2459 2460 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 2461 NPTE2_IN_PT2); 2462 } 2463 2464 static __inline boolean_t 2465 pt2pg_is_empty(vm_page_t m) 2466 { 2467 2468 return (m->wire_count == 1); 2469 } 2470 2471 /* 2472 * This routine is called if the L2 page table 2473 * is not mapped correctly. 2474 */ 2475 static vm_page_t 2476 _pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2477 { 2478 uint32_t pte1_idx; 2479 pt1_entry_t *pte1p; 2480 pt2_entry_t pte2; 2481 vm_page_t m; 2482 vm_paddr_t pt2pg_pa, pt2_pa; 2483 2484 pte1_idx = pte1_index(va); 2485 pte1p = pmap->pm_pt1 + pte1_idx; 2486 2487 KASSERT(pte1_load(pte1p) == 0, 2488 ("%s: pm_pt1[%#x] is not zero: %#x", __func__, pte1_idx, 2489 pte1_load(pte1p))); 2490 2491 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, va)); 2492 if (!pte2_is_valid(pte2)) { 2493 /* 2494 * Install new PT2s page into pmap PT2TAB. 2495 */ 2496 m = vm_page_alloc(NULL, pte1_idx & ~PT2PG_MASK, 2497 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2498 if (m == NULL) { 2499 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2500 PMAP_UNLOCK(pmap); 2501 rw_wunlock(&pvh_global_lock); 2502 vm_wait(NULL); 2503 rw_wlock(&pvh_global_lock); 2504 PMAP_LOCK(pmap); 2505 } 2506 2507 /* 2508 * Indicate the need to retry. While waiting, 2509 * the L2 page table page may have been allocated. 2510 */ 2511 return (NULL); 2512 } 2513 pmap->pm_stats.resident_count++; 2514 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 2515 } else { 2516 pt2pg_pa = pte2_pa(pte2); 2517 m = PHYS_TO_VM_PAGE(pt2pg_pa); 2518 } 2519 2520 pt2_wirecount_inc(m, pte1_idx); 2521 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 2522 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 2523 2524 return (m); 2525 } 2526 2527 static vm_page_t 2528 pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2529 { 2530 u_int pte1_idx; 2531 pt1_entry_t *pte1p, pte1; 2532 vm_page_t m; 2533 2534 pte1_idx = pte1_index(va); 2535 retry: 2536 pte1p = pmap->pm_pt1 + pte1_idx; 2537 pte1 = pte1_load(pte1p); 2538 2539 /* 2540 * This supports switching from a 1MB page to a 2541 * normal 4K page. 2542 */ 2543 if (pte1_is_section(pte1)) { 2544 (void)pmap_demote_pte1(pmap, pte1p, va); 2545 /* 2546 * Reload pte1 after demotion. 2547 * 2548 * Note: Demotion can even fail as either PT2 is not find for 2549 * the virtual address or PT2PG can not be allocated. 2550 */ 2551 pte1 = pte1_load(pte1p); 2552 } 2553 2554 /* 2555 * If the L2 page table page is mapped, we just increment the 2556 * hold count, and activate it. 2557 */ 2558 if (pte1_is_link(pte1)) { 2559 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2560 pt2_wirecount_inc(m, pte1_idx); 2561 } else { 2562 /* 2563 * Here if the PT2 isn't mapped, or if it has 2564 * been deallocated. 2565 */ 2566 m = _pmap_allocpte2(pmap, va, flags); 2567 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2568 goto retry; 2569 } 2570 2571 return (m); 2572 } 2573 2574 /* 2575 * Schedule the specified unused L2 page table page to be freed. Specifically, 2576 * add the page to the specified list of pages that will be released to the 2577 * physical memory manager after the TLB has been updated. 2578 */ 2579 static __inline void 2580 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free) 2581 { 2582 2583 /* 2584 * Put page on a list so that it is released after 2585 * *ALL* TLB shootdown is done 2586 */ 2587 #ifdef PMAP_DEBUG 2588 pmap_zero_page_check(m); 2589 #endif 2590 m->flags |= PG_ZERO; 2591 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 2592 } 2593 2594 /* 2595 * Unwire L2 page tables page. 2596 */ 2597 static void 2598 pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m) 2599 { 2600 pt1_entry_t *pte1p, opte1 __unused; 2601 pt2_entry_t *pte2p; 2602 uint32_t i; 2603 2604 KASSERT(pt2pg_is_empty(m), 2605 ("%s: pmap %p PT2PG %p wired", __func__, pmap, m)); 2606 2607 /* 2608 * Unmap all L2 page tables in the page from L1 page table. 2609 * 2610 * QQQ: Individual L2 page tables (except the last one) can be unmapped 2611 * earlier. However, we are doing that this way. 2612 */ 2613 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 2614 ("%s: pmap %p va %#x PT2PG %p bad index", __func__, pmap, va, m)); 2615 pte1p = pmap->pm_pt1 + m->pindex; 2616 for (i = 0; i < NPT2_IN_PG; i++, pte1p++) { 2617 KASSERT(m->md.pt2_wirecount[i] == 0, 2618 ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m)); 2619 opte1 = pte1_load(pte1p); 2620 if (pte1_is_link(opte1)) { 2621 pte1_clear(pte1p); 2622 /* 2623 * Flush intermediate TLB cache. 2624 */ 2625 pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT); 2626 } 2627 #ifdef INVARIANTS 2628 else 2629 KASSERT((opte1 == 0) || pte1_is_section(opte1), 2630 ("%s: pmap %p va %#x bad pte1 %x at %u", __func__, 2631 pmap, va, opte1, i)); 2632 #endif 2633 } 2634 2635 /* 2636 * Unmap the page from PT2TAB. 2637 */ 2638 pte2p = pmap_pt2tab_entry(pmap, va); 2639 (void)pt2tab_load_clear(pte2p); 2640 pmap_tlb_flush(pmap, pt2map_pt2pg(va)); 2641 2642 m->wire_count = 0; 2643 pmap->pm_stats.resident_count--; 2644 2645 /* 2646 * This barrier is so that the ordinary store unmapping 2647 * the L2 page table page is globally performed before TLB shoot- 2648 * down is begun. 2649 */ 2650 wmb(); 2651 vm_wire_sub(1); 2652 } 2653 2654 /* 2655 * Decrements a L2 page table page's wire count, which is used to record the 2656 * number of valid page table entries within the page. If the wire count 2657 * drops to zero, then the page table page is unmapped. Returns TRUE if the 2658 * page table page was unmapped and FALSE otherwise. 2659 */ 2660 static __inline boolean_t 2661 pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 2662 { 2663 pt2_wirecount_dec(m, pte1_index(va)); 2664 if (pt2pg_is_empty(m)) { 2665 /* 2666 * QQQ: Wire count is zero, so whole page should be zero and 2667 * we can set PG_ZERO flag to it. 2668 * Note that when promotion is enabled, it takes some 2669 * more efforts. See pmap_unwire_pt2_all() below. 2670 */ 2671 pmap_unwire_pt2pg(pmap, va, m); 2672 pmap_add_delayed_free_list(m, free); 2673 return (TRUE); 2674 } else 2675 return (FALSE); 2676 } 2677 2678 /* 2679 * Drop a L2 page table page's wire count at once, which is used to record 2680 * the number of valid L2 page table entries within the page. If the wire 2681 * count drops to zero, then the L2 page table page is unmapped. 2682 */ 2683 static __inline void 2684 pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m, 2685 struct spglist *free) 2686 { 2687 u_int pte1_idx = pte1_index(va); 2688 2689 KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK), 2690 ("%s: PT2 page's pindex is wrong", __func__)); 2691 KASSERT(m->wire_count > pt2_wirecount_get(m, pte1_idx), 2692 ("%s: bad pt2 wire count %u > %u", __func__, m->wire_count, 2693 pt2_wirecount_get(m, pte1_idx))); 2694 2695 /* 2696 * It's possible that the L2 page table was never used. 2697 * It happened in case that a section was created without promotion. 2698 */ 2699 if (pt2_is_full(m, va)) { 2700 pt2_wirecount_set(m, pte1_idx, 0); 2701 2702 /* 2703 * QQQ: We clear L2 page table now, so when L2 page table page 2704 * is going to be freed, we can set it PG_ZERO flag ... 2705 * This function is called only on section mappings, so 2706 * hopefully it's not to big overload. 2707 * 2708 * XXX: If pmap is current, existing PT2MAP mapping could be 2709 * used for zeroing. 2710 */ 2711 pmap_zero_page_area(m, page_pt2off(pte1_idx), NB_IN_PT2); 2712 } 2713 #ifdef INVARIANTS 2714 else 2715 KASSERT(pt2_is_empty(m, va), ("%s: PT2 is not empty (%u)", 2716 __func__, pt2_wirecount_get(m, pte1_idx))); 2717 #endif 2718 if (pt2pg_is_empty(m)) { 2719 pmap_unwire_pt2pg(pmap, va, m); 2720 pmap_add_delayed_free_list(m, free); 2721 } 2722 } 2723 2724 /* 2725 * After removing a L2 page table entry, this routine is used to 2726 * conditionally free the page, and manage the hold/wire counts. 2727 */ 2728 static boolean_t 2729 pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free) 2730 { 2731 pt1_entry_t pte1; 2732 vm_page_t mpte; 2733 2734 if (va >= VM_MAXUSER_ADDRESS) 2735 return (FALSE); 2736 pte1 = pte1_load(pmap_pte1(pmap, va)); 2737 mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2738 return (pmap_unwire_pt2(pmap, va, mpte, free)); 2739 } 2740 2741 /************************************* 2742 * 2743 * Page management routines. 2744 * 2745 *************************************/ 2746 2747 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 2748 CTASSERT(_NPCM == 11); 2749 CTASSERT(_NPCPV == 336); 2750 2751 static __inline struct pv_chunk * 2752 pv_to_chunk(pv_entry_t pv) 2753 { 2754 2755 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 2756 } 2757 2758 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2759 2760 #define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 2761 #define PC_FREE10 0x0000fffful /* Free values for index 10 */ 2762 2763 static const uint32_t pc_freemask[_NPCM] = { 2764 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2765 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2766 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2767 PC_FREE0_9, PC_FREE10 2768 }; 2769 2770 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 2771 "Current number of pv entries"); 2772 2773 #ifdef PV_STATS 2774 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2775 2776 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 2777 "Current number of pv entry chunks"); 2778 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 2779 "Current number of pv entry chunks allocated"); 2780 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 2781 "Current number of pv entry chunks frees"); 2782 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 2783 0, "Number of times tried to get a chunk page but failed."); 2784 2785 static long pv_entry_frees, pv_entry_allocs; 2786 static int pv_entry_spare; 2787 2788 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 2789 "Current number of pv entry frees"); 2790 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 2791 0, "Current number of pv entry allocs"); 2792 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 2793 "Current number of spare pv entries"); 2794 #endif 2795 2796 /* 2797 * Is given page managed? 2798 */ 2799 static __inline bool 2800 is_managed(vm_paddr_t pa) 2801 { 2802 vm_page_t m; 2803 2804 m = PHYS_TO_VM_PAGE(pa); 2805 if (m == NULL) 2806 return (false); 2807 return ((m->oflags & VPO_UNMANAGED) == 0); 2808 } 2809 2810 static __inline bool 2811 pte1_is_managed(pt1_entry_t pte1) 2812 { 2813 2814 return (is_managed(pte1_pa(pte1))); 2815 } 2816 2817 static __inline bool 2818 pte2_is_managed(pt2_entry_t pte2) 2819 { 2820 2821 return (is_managed(pte2_pa(pte2))); 2822 } 2823 2824 /* 2825 * We are in a serious low memory condition. Resort to 2826 * drastic measures to free some pages so we can allocate 2827 * another pv entry chunk. 2828 */ 2829 static vm_page_t 2830 pmap_pv_reclaim(pmap_t locked_pmap) 2831 { 2832 struct pch newtail; 2833 struct pv_chunk *pc; 2834 struct md_page *pvh; 2835 pt1_entry_t *pte1p; 2836 pmap_t pmap; 2837 pt2_entry_t *pte2p, tpte2; 2838 pv_entry_t pv; 2839 vm_offset_t va; 2840 vm_page_t m, m_pc; 2841 struct spglist free; 2842 uint32_t inuse; 2843 int bit, field, freed; 2844 2845 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2846 pmap = NULL; 2847 m_pc = NULL; 2848 SLIST_INIT(&free); 2849 TAILQ_INIT(&newtail); 2850 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2851 SLIST_EMPTY(&free))) { 2852 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2853 if (pmap != pc->pc_pmap) { 2854 if (pmap != NULL) { 2855 if (pmap != locked_pmap) 2856 PMAP_UNLOCK(pmap); 2857 } 2858 pmap = pc->pc_pmap; 2859 /* Avoid deadlock and lock recursion. */ 2860 if (pmap > locked_pmap) 2861 PMAP_LOCK(pmap); 2862 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2863 pmap = NULL; 2864 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2865 continue; 2866 } 2867 } 2868 2869 /* 2870 * Destroy every non-wired, 4 KB page mapping in the chunk. 2871 */ 2872 freed = 0; 2873 for (field = 0; field < _NPCM; field++) { 2874 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2875 inuse != 0; inuse &= ~(1UL << bit)) { 2876 bit = ffs(inuse) - 1; 2877 pv = &pc->pc_pventry[field * 32 + bit]; 2878 va = pv->pv_va; 2879 pte1p = pmap_pte1(pmap, va); 2880 if (pte1_is_section(pte1_load(pte1p))) 2881 continue; 2882 pte2p = pmap_pte2(pmap, va); 2883 tpte2 = pte2_load(pte2p); 2884 if ((tpte2 & PTE2_W) == 0) 2885 tpte2 = pte2_load_clear(pte2p); 2886 pmap_pte2_release(pte2p); 2887 if ((tpte2 & PTE2_W) != 0) 2888 continue; 2889 KASSERT(tpte2 != 0, 2890 ("pmap_pv_reclaim: pmap %p va %#x zero pte", 2891 pmap, va)); 2892 pmap_tlb_flush(pmap, va); 2893 m = PHYS_TO_VM_PAGE(pte2_pa(tpte2)); 2894 if (pte2_is_dirty(tpte2)) 2895 vm_page_dirty(m); 2896 if ((tpte2 & PTE2_A) != 0) 2897 vm_page_aflag_set(m, PGA_REFERENCED); 2898 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2899 if (TAILQ_EMPTY(&m->md.pv_list) && 2900 (m->flags & PG_FICTITIOUS) == 0) { 2901 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2902 if (TAILQ_EMPTY(&pvh->pv_list)) { 2903 vm_page_aflag_clear(m, 2904 PGA_WRITEABLE); 2905 } 2906 } 2907 pc->pc_map[field] |= 1UL << bit; 2908 pmap_unuse_pt2(pmap, va, &free); 2909 freed++; 2910 } 2911 } 2912 if (freed == 0) { 2913 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2914 continue; 2915 } 2916 /* Every freed mapping is for a 4 KB page. */ 2917 pmap->pm_stats.resident_count -= freed; 2918 PV_STAT(pv_entry_frees += freed); 2919 PV_STAT(pv_entry_spare += freed); 2920 pv_entry_count -= freed; 2921 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2922 for (field = 0; field < _NPCM; field++) 2923 if (pc->pc_map[field] != pc_freemask[field]) { 2924 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2925 pc_list); 2926 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2927 2928 /* 2929 * One freed pv entry in locked_pmap is 2930 * sufficient. 2931 */ 2932 if (pmap == locked_pmap) 2933 goto out; 2934 break; 2935 } 2936 if (field == _NPCM) { 2937 PV_STAT(pv_entry_spare -= _NPCPV); 2938 PV_STAT(pc_chunk_count--); 2939 PV_STAT(pc_chunk_frees++); 2940 /* Entire chunk is free; return it. */ 2941 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2942 pmap_qremove((vm_offset_t)pc, 1); 2943 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2944 break; 2945 } 2946 } 2947 out: 2948 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2949 if (pmap != NULL) { 2950 if (pmap != locked_pmap) 2951 PMAP_UNLOCK(pmap); 2952 } 2953 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2954 m_pc = SLIST_FIRST(&free); 2955 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2956 /* Recycle a freed page table page. */ 2957 m_pc->wire_count = 1; 2958 vm_wire_add(1); 2959 } 2960 vm_page_free_pages_toq(&free, false); 2961 return (m_pc); 2962 } 2963 2964 static void 2965 free_pv_chunk(struct pv_chunk *pc) 2966 { 2967 vm_page_t m; 2968 2969 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2970 PV_STAT(pv_entry_spare -= _NPCPV); 2971 PV_STAT(pc_chunk_count--); 2972 PV_STAT(pc_chunk_frees++); 2973 /* entire chunk is free, return it */ 2974 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2975 pmap_qremove((vm_offset_t)pc, 1); 2976 vm_page_unwire(m, PQ_NONE); 2977 vm_page_free(m); 2978 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2979 } 2980 2981 /* 2982 * Free the pv_entry back to the free list. 2983 */ 2984 static void 2985 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2986 { 2987 struct pv_chunk *pc; 2988 int idx, field, bit; 2989 2990 rw_assert(&pvh_global_lock, RA_WLOCKED); 2991 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2992 PV_STAT(pv_entry_frees++); 2993 PV_STAT(pv_entry_spare++); 2994 pv_entry_count--; 2995 pc = pv_to_chunk(pv); 2996 idx = pv - &pc->pc_pventry[0]; 2997 field = idx / 32; 2998 bit = idx % 32; 2999 pc->pc_map[field] |= 1ul << bit; 3000 for (idx = 0; idx < _NPCM; idx++) 3001 if (pc->pc_map[idx] != pc_freemask[idx]) { 3002 /* 3003 * 98% of the time, pc is already at the head of the 3004 * list. If it isn't already, move it to the head. 3005 */ 3006 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 3007 pc)) { 3008 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3009 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 3010 pc_list); 3011 } 3012 return; 3013 } 3014 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3015 free_pv_chunk(pc); 3016 } 3017 3018 /* 3019 * Get a new pv_entry, allocating a block from the system 3020 * when needed. 3021 */ 3022 static pv_entry_t 3023 get_pv_entry(pmap_t pmap, boolean_t try) 3024 { 3025 static const struct timeval printinterval = { 60, 0 }; 3026 static struct timeval lastprint; 3027 int bit, field; 3028 pv_entry_t pv; 3029 struct pv_chunk *pc; 3030 vm_page_t m; 3031 3032 rw_assert(&pvh_global_lock, RA_WLOCKED); 3033 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3034 PV_STAT(pv_entry_allocs++); 3035 pv_entry_count++; 3036 if (pv_entry_count > pv_entry_high_water) 3037 if (ratecheck(&lastprint, &printinterval)) 3038 printf("Approaching the limit on PV entries, consider " 3039 "increasing either the vm.pmap.shpgperproc or the " 3040 "vm.pmap.pv_entries tunable.\n"); 3041 retry: 3042 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 3043 if (pc != NULL) { 3044 for (field = 0; field < _NPCM; field++) { 3045 if (pc->pc_map[field]) { 3046 bit = ffs(pc->pc_map[field]) - 1; 3047 break; 3048 } 3049 } 3050 if (field < _NPCM) { 3051 pv = &pc->pc_pventry[field * 32 + bit]; 3052 pc->pc_map[field] &= ~(1ul << bit); 3053 /* If this was the last item, move it to tail */ 3054 for (field = 0; field < _NPCM; field++) 3055 if (pc->pc_map[field] != 0) { 3056 PV_STAT(pv_entry_spare--); 3057 return (pv); /* not full, return */ 3058 } 3059 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3060 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 3061 PV_STAT(pv_entry_spare--); 3062 return (pv); 3063 } 3064 } 3065 /* 3066 * Access to the pte2list "pv_vafree" is synchronized by the pvh 3067 * global lock. If "pv_vafree" is currently non-empty, it will 3068 * remain non-empty until pmap_pte2list_alloc() completes. 3069 */ 3070 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 3071 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 3072 if (try) { 3073 pv_entry_count--; 3074 PV_STAT(pc_chunk_tryfail++); 3075 return (NULL); 3076 } 3077 m = pmap_pv_reclaim(pmap); 3078 if (m == NULL) 3079 goto retry; 3080 } 3081 PV_STAT(pc_chunk_count++); 3082 PV_STAT(pc_chunk_allocs++); 3083 pc = (struct pv_chunk *)pmap_pte2list_alloc(&pv_vafree); 3084 pmap_qenter((vm_offset_t)pc, &m, 1); 3085 pc->pc_pmap = pmap; 3086 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 3087 for (field = 1; field < _NPCM; field++) 3088 pc->pc_map[field] = pc_freemask[field]; 3089 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 3090 pv = &pc->pc_pventry[0]; 3091 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 3092 PV_STAT(pv_entry_spare += _NPCPV - 1); 3093 return (pv); 3094 } 3095 3096 /* 3097 * Create a pv entry for page at pa for 3098 * (pmap, va). 3099 */ 3100 static void 3101 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3102 { 3103 pv_entry_t pv; 3104 3105 rw_assert(&pvh_global_lock, RA_WLOCKED); 3106 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3107 pv = get_pv_entry(pmap, FALSE); 3108 pv->pv_va = va; 3109 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3110 } 3111 3112 static __inline pv_entry_t 3113 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3114 { 3115 pv_entry_t pv; 3116 3117 rw_assert(&pvh_global_lock, RA_WLOCKED); 3118 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 3119 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 3120 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 3121 break; 3122 } 3123 } 3124 return (pv); 3125 } 3126 3127 static void 3128 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3129 { 3130 pv_entry_t pv; 3131 3132 pv = pmap_pvh_remove(pvh, pmap, va); 3133 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 3134 free_pv_entry(pmap, pv); 3135 } 3136 3137 static void 3138 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 3139 { 3140 struct md_page *pvh; 3141 3142 rw_assert(&pvh_global_lock, RA_WLOCKED); 3143 pmap_pvh_free(&m->md, pmap, va); 3144 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 3145 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3146 if (TAILQ_EMPTY(&pvh->pv_list)) 3147 vm_page_aflag_clear(m, PGA_WRITEABLE); 3148 } 3149 } 3150 3151 static void 3152 pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3153 { 3154 struct md_page *pvh; 3155 pv_entry_t pv; 3156 vm_offset_t va_last; 3157 vm_page_t m; 3158 3159 rw_assert(&pvh_global_lock, RA_WLOCKED); 3160 KASSERT((pa & PTE1_OFFSET) == 0, 3161 ("pmap_pv_demote_pte1: pa is not 1mpage aligned")); 3162 3163 /* 3164 * Transfer the 1mpage's pv entry for this mapping to the first 3165 * page's pv list. 3166 */ 3167 pvh = pa_to_pvh(pa); 3168 va = pte1_trunc(va); 3169 pv = pmap_pvh_remove(pvh, pmap, va); 3170 KASSERT(pv != NULL, ("pmap_pv_demote_pte1: pv not found")); 3171 m = PHYS_TO_VM_PAGE(pa); 3172 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3173 /* Instantiate the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3174 va_last = va + PTE1_SIZE - PAGE_SIZE; 3175 do { 3176 m++; 3177 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3178 ("pmap_pv_demote_pte1: page %p is not managed", m)); 3179 va += PAGE_SIZE; 3180 pmap_insert_entry(pmap, va, m); 3181 } while (va < va_last); 3182 } 3183 3184 #if VM_NRESERVLEVEL > 0 3185 static void 3186 pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3187 { 3188 struct md_page *pvh; 3189 pv_entry_t pv; 3190 vm_offset_t va_last; 3191 vm_page_t m; 3192 3193 rw_assert(&pvh_global_lock, RA_WLOCKED); 3194 KASSERT((pa & PTE1_OFFSET) == 0, 3195 ("pmap_pv_promote_pte1: pa is not 1mpage aligned")); 3196 3197 /* 3198 * Transfer the first page's pv entry for this mapping to the 3199 * 1mpage's pv list. Aside from avoiding the cost of a call 3200 * to get_pv_entry(), a transfer avoids the possibility that 3201 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim() 3202 * removes one of the mappings that is being promoted. 3203 */ 3204 m = PHYS_TO_VM_PAGE(pa); 3205 va = pte1_trunc(va); 3206 pv = pmap_pvh_remove(&m->md, pmap, va); 3207 KASSERT(pv != NULL, ("pmap_pv_promote_pte1: pv not found")); 3208 pvh = pa_to_pvh(pa); 3209 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3210 /* Free the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3211 va_last = va + PTE1_SIZE - PAGE_SIZE; 3212 do { 3213 m++; 3214 va += PAGE_SIZE; 3215 pmap_pvh_free(&m->md, pmap, va); 3216 } while (va < va_last); 3217 } 3218 #endif 3219 3220 /* 3221 * Conditionally create a pv entry. 3222 */ 3223 static boolean_t 3224 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3225 { 3226 pv_entry_t pv; 3227 3228 rw_assert(&pvh_global_lock, RA_WLOCKED); 3229 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3230 if (pv_entry_count < pv_entry_high_water && 3231 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 3232 pv->pv_va = va; 3233 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3234 return (TRUE); 3235 } else 3236 return (FALSE); 3237 } 3238 3239 /* 3240 * Create the pv entries for each of the pages within a section. 3241 */ 3242 static bool 3243 pmap_pv_insert_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags) 3244 { 3245 struct md_page *pvh; 3246 pv_entry_t pv; 3247 bool noreclaim; 3248 3249 rw_assert(&pvh_global_lock, RA_WLOCKED); 3250 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 3251 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 3252 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 3253 return (false); 3254 pv->pv_va = va; 3255 pvh = pa_to_pvh(pte1_pa(pte1)); 3256 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3257 return (true); 3258 } 3259 3260 static inline void 3261 pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1) 3262 { 3263 3264 /* Kill all the small mappings or the big one only. */ 3265 if (pte1_is_section(npte1)) 3266 pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE); 3267 else 3268 pmap_tlb_flush(pmap, pte1_trunc(va)); 3269 } 3270 3271 /* 3272 * Update kernel pte1 on all pmaps. 3273 * 3274 * The following function is called only on one cpu with disabled interrupts. 3275 * In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way 3276 * nobody can invoke explicit hardware table walk during the update of pte1. 3277 * Unsolicited hardware table walk can still happen, invoked by speculative 3278 * data or instruction prefetch or even by speculative hardware table walk. 3279 * 3280 * The break-before-make approach should be implemented here. However, it's 3281 * not so easy to do that for kernel mappings as it would be unhappy to unmap 3282 * itself unexpectedly but voluntarily. 3283 */ 3284 static void 3285 pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1) 3286 { 3287 pmap_t pmap; 3288 pt1_entry_t *pte1p; 3289 3290 /* 3291 * Get current pmap. Interrupts should be disabled here 3292 * so PCPU_GET() is done atomically. 3293 */ 3294 pmap = PCPU_GET(curpmap); 3295 if (pmap == NULL) 3296 pmap = kernel_pmap; 3297 3298 /* 3299 * (1) Change pte1 on current pmap. 3300 * (2) Flush all obsolete TLB entries on current CPU. 3301 * (3) Change pte1 on all pmaps. 3302 * (4) Flush all obsolete TLB entries on all CPUs in SMP case. 3303 */ 3304 3305 pte1p = pmap_pte1(pmap, va); 3306 pte1_store(pte1p, npte1); 3307 3308 /* Kill all the small mappings or the big one only. */ 3309 if (pte1_is_section(npte1)) { 3310 pmap_pte1_kern_promotions++; 3311 tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE); 3312 } else { 3313 pmap_pte1_kern_demotions++; 3314 tlb_flush_local(pte1_trunc(va)); 3315 } 3316 3317 /* 3318 * In SMP case, this function is called when all cpus are at smp 3319 * rendezvous, so there is no need to use 'allpmaps_lock' lock here. 3320 * In UP case, the function is called with this lock locked. 3321 */ 3322 LIST_FOREACH(pmap, &allpmaps, pm_list) { 3323 pte1p = pmap_pte1(pmap, va); 3324 pte1_store(pte1p, npte1); 3325 } 3326 3327 #ifdef SMP 3328 /* Kill all the small mappings or the big one only. */ 3329 if (pte1_is_section(npte1)) 3330 tlb_flush_range(pte1_trunc(va), PTE1_SIZE); 3331 else 3332 tlb_flush(pte1_trunc(va)); 3333 #endif 3334 } 3335 3336 #ifdef SMP 3337 struct pte1_action { 3338 vm_offset_t va; 3339 pt1_entry_t npte1; 3340 u_int update; /* CPU that updates the PTE1 */ 3341 }; 3342 3343 static void 3344 pmap_update_pte1_action(void *arg) 3345 { 3346 struct pte1_action *act = arg; 3347 3348 if (act->update == PCPU_GET(cpuid)) 3349 pmap_update_pte1_kernel(act->va, act->npte1); 3350 } 3351 3352 /* 3353 * Change pte1 on current pmap. 3354 * Note that kernel pte1 must be changed on all pmaps. 3355 * 3356 * According to the architecture reference manual published by ARM, 3357 * the behaviour is UNPREDICTABLE when two or more TLB entries map the same VA. 3358 * According to this manual, UNPREDICTABLE behaviours must never happen in 3359 * a viable system. In contrast, on x86 processors, it is not specified which 3360 * TLB entry mapping the virtual address will be used, but the MMU doesn't 3361 * generate a bogus translation the way it does on Cortex-A8 rev 2 (Beaglebone 3362 * Black). 3363 * 3364 * It's a problem when either promotion or demotion is being done. The pte1 3365 * update and appropriate TLB flush must be done atomically in general. 3366 */ 3367 static void 3368 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3369 pt1_entry_t npte1) 3370 { 3371 3372 if (pmap == kernel_pmap) { 3373 struct pte1_action act; 3374 3375 sched_pin(); 3376 act.va = va; 3377 act.npte1 = npte1; 3378 act.update = PCPU_GET(cpuid); 3379 smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier, 3380 pmap_update_pte1_action, NULL, &act); 3381 sched_unpin(); 3382 } else { 3383 register_t cspr; 3384 3385 /* 3386 * Use break-before-make approach for changing userland 3387 * mappings. It can cause L1 translation aborts on other 3388 * cores in SMP case. So, special treatment is implemented 3389 * in pmap_fault(). To reduce the likelihood that another core 3390 * will be affected by the broken mapping, disable interrupts 3391 * until the mapping change is completed. 3392 */ 3393 cspr = disable_interrupts(PSR_I | PSR_F); 3394 pte1_clear(pte1p); 3395 pmap_tlb_flush_pte1(pmap, va, npte1); 3396 pte1_store(pte1p, npte1); 3397 restore_interrupts(cspr); 3398 } 3399 } 3400 #else 3401 static void 3402 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3403 pt1_entry_t npte1) 3404 { 3405 3406 if (pmap == kernel_pmap) { 3407 mtx_lock_spin(&allpmaps_lock); 3408 pmap_update_pte1_kernel(va, npte1); 3409 mtx_unlock_spin(&allpmaps_lock); 3410 } else { 3411 register_t cspr; 3412 3413 /* 3414 * Use break-before-make approach for changing userland 3415 * mappings. It's absolutely safe in UP case when interrupts 3416 * are disabled. 3417 */ 3418 cspr = disable_interrupts(PSR_I | PSR_F); 3419 pte1_clear(pte1p); 3420 pmap_tlb_flush_pte1(pmap, va, npte1); 3421 pte1_store(pte1p, npte1); 3422 restore_interrupts(cspr); 3423 } 3424 } 3425 #endif 3426 3427 #if VM_NRESERVLEVEL > 0 3428 /* 3429 * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are 3430 * within a single page table page (PT2) to a single 1MB page mapping. 3431 * For promotion to occur, two conditions must be met: (1) the 4KB page 3432 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3433 * mappings must have identical characteristics. 3434 * 3435 * Managed (PG_MANAGED) mappings within the kernel address space are not 3436 * promoted. The reason is that kernel PTE1s are replicated in each pmap but 3437 * pmap_remove_write(), pmap_clear_modify(), and pmap_clear_reference() only 3438 * read the PTE1 from the kernel pmap. 3439 */ 3440 static void 3441 pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3442 { 3443 pt1_entry_t npte1; 3444 pt2_entry_t *fpte2p, fpte2, fpte2_fav; 3445 pt2_entry_t *pte2p, pte2; 3446 vm_offset_t pteva __unused; 3447 vm_page_t m __unused; 3448 3449 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3450 pmap, va, pte1_load(pte1p), pte1p)); 3451 3452 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3453 3454 /* 3455 * Examine the first PTE2 in the specified PT2. Abort if this PTE2 is 3456 * either invalid, unused, or does not map the first 4KB physical page 3457 * within a 1MB page. 3458 */ 3459 fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va)); 3460 fpte2 = pte2_load(fpte2p); 3461 if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) != 3462 (PTE2_A | PTE2_V)) { 3463 pmap_pte1_p_failures++; 3464 CTR3(KTR_PMAP, "%s: failure(1) for va %#x in pmap %p", 3465 __func__, va, pmap); 3466 return; 3467 } 3468 if (pte2_is_managed(fpte2) && pmap == kernel_pmap) { 3469 pmap_pte1_p_failures++; 3470 CTR3(KTR_PMAP, "%s: failure(2) for va %#x in pmap %p", 3471 __func__, va, pmap); 3472 return; 3473 } 3474 if ((fpte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3475 /* 3476 * When page is not modified, PTE2_RO can be set without 3477 * a TLB invalidation. 3478 */ 3479 fpte2 |= PTE2_RO; 3480 pte2_store(fpte2p, fpte2); 3481 } 3482 3483 /* 3484 * Examine each of the other PTE2s in the specified PT2. Abort if this 3485 * PTE2 maps an unexpected 4KB physical page or does not have identical 3486 * characteristics to the first PTE2. 3487 */ 3488 fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V)); 3489 fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */ 3490 for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) { 3491 pte2 = pte2_load(pte2p); 3492 if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) { 3493 pmap_pte1_p_failures++; 3494 CTR3(KTR_PMAP, "%s: failure(3) for va %#x in pmap %p", 3495 __func__, va, pmap); 3496 return; 3497 } 3498 if ((pte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3499 /* 3500 * When page is not modified, PTE2_RO can be set 3501 * without a TLB invalidation. See note above. 3502 */ 3503 pte2 |= PTE2_RO; 3504 pte2_store(pte2p, pte2); 3505 pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET & 3506 PTE2_FRAME); 3507 CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p", 3508 __func__, pteva, pmap); 3509 } 3510 if ((pte2 & PTE2_PROMOTE) != (fpte2 & PTE2_PROMOTE)) { 3511 pmap_pte1_p_failures++; 3512 CTR3(KTR_PMAP, "%s: failure(4) for va %#x in pmap %p", 3513 __func__, va, pmap); 3514 return; 3515 } 3516 3517 fpte2_fav -= PTE2_SIZE; 3518 } 3519 /* 3520 * The page table page in its current state will stay in PT2TAB 3521 * until the PTE1 mapping the section is demoted by pmap_demote_pte1() 3522 * or destroyed by pmap_remove_pte1(). 3523 * 3524 * Note that L2 page table size is not equal to PAGE_SIZE. 3525 */ 3526 m = PHYS_TO_VM_PAGE(trunc_page(pte1_link_pa(pte1_load(pte1p)))); 3527 KASSERT(m >= vm_page_array && m < &vm_page_array[vm_page_array_size], 3528 ("%s: PT2 page is out of range", __func__)); 3529 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 3530 ("%s: PT2 page's pindex is wrong", __func__)); 3531 3532 /* 3533 * Get pte1 from pte2 format. 3534 */ 3535 npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V; 3536 3537 /* 3538 * Promote the pv entries. 3539 */ 3540 if (pte2_is_managed(fpte2)) 3541 pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1)); 3542 3543 /* 3544 * Promote the mappings. 3545 */ 3546 pmap_change_pte1(pmap, pte1p, va, npte1); 3547 3548 pmap_pte1_promotions++; 3549 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3550 __func__, va, pmap); 3551 3552 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3553 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3554 } 3555 #endif /* VM_NRESERVLEVEL > 0 */ 3556 3557 /* 3558 * Zero L2 page table page. 3559 */ 3560 static __inline void 3561 pmap_clear_pt2(pt2_entry_t *fpte2p) 3562 { 3563 pt2_entry_t *pte2p; 3564 3565 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) 3566 pte2_clear(pte2p); 3567 3568 } 3569 3570 /* 3571 * Removes a 1MB page mapping from the kernel pmap. 3572 */ 3573 static void 3574 pmap_remove_kernel_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3575 { 3576 vm_page_t m; 3577 uint32_t pte1_idx; 3578 pt2_entry_t *fpte2p; 3579 vm_paddr_t pt2_pa; 3580 3581 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3582 m = pmap_pt2_page(pmap, va); 3583 if (m == NULL) 3584 /* 3585 * QQQ: Is this function called only on promoted pte1? 3586 * We certainly do section mappings directly 3587 * (without promotion) in kernel !!! 3588 */ 3589 panic("%s: missing pt2 page", __func__); 3590 3591 pte1_idx = pte1_index(va); 3592 3593 /* 3594 * Initialize the L2 page table. 3595 */ 3596 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3597 pmap_clear_pt2(fpte2p); 3598 3599 /* 3600 * Remove the mapping. 3601 */ 3602 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(m), pte1_idx); 3603 pmap_kenter_pte1(va, PTE1_LINK(pt2_pa)); 3604 3605 /* 3606 * QQQ: We do not need to invalidate PT2MAP mapping 3607 * as we did not change it. I.e. the L2 page table page 3608 * was and still is mapped the same way. 3609 */ 3610 } 3611 3612 /* 3613 * Do the things to unmap a section in a process 3614 */ 3615 static void 3616 pmap_remove_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 3617 struct spglist *free) 3618 { 3619 pt1_entry_t opte1; 3620 struct md_page *pvh; 3621 vm_offset_t eva, va; 3622 vm_page_t m; 3623 3624 PDEBUG(6, printf("%s(%p): va %#x pte1 %#x at %p\n", __func__, pmap, sva, 3625 pte1_load(pte1p), pte1p)); 3626 3627 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3628 KASSERT((sva & PTE1_OFFSET) == 0, 3629 ("%s: sva is not 1mpage aligned", __func__)); 3630 3631 /* 3632 * Clear and invalidate the mapping. It should occupy one and only TLB 3633 * entry. So, pmap_tlb_flush() called with aligned address should be 3634 * sufficient. 3635 */ 3636 opte1 = pte1_load_clear(pte1p); 3637 pmap_tlb_flush(pmap, sva); 3638 3639 if (pte1_is_wired(opte1)) 3640 pmap->pm_stats.wired_count -= PTE1_SIZE / PAGE_SIZE; 3641 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 3642 if (pte1_is_managed(opte1)) { 3643 pvh = pa_to_pvh(pte1_pa(opte1)); 3644 pmap_pvh_free(pvh, pmap, sva); 3645 eva = sva + PTE1_SIZE; 3646 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 3647 va < eva; va += PAGE_SIZE, m++) { 3648 if (pte1_is_dirty(opte1)) 3649 vm_page_dirty(m); 3650 if (opte1 & PTE1_A) 3651 vm_page_aflag_set(m, PGA_REFERENCED); 3652 if (TAILQ_EMPTY(&m->md.pv_list) && 3653 TAILQ_EMPTY(&pvh->pv_list)) 3654 vm_page_aflag_clear(m, PGA_WRITEABLE); 3655 } 3656 } 3657 if (pmap == kernel_pmap) { 3658 /* 3659 * L2 page table(s) can't be removed from kernel map as 3660 * kernel counts on it (stuff around pmap_growkernel()). 3661 */ 3662 pmap_remove_kernel_pte1(pmap, pte1p, sva); 3663 } else { 3664 /* 3665 * Get associated L2 page table page. 3666 * It's possible that the page was never allocated. 3667 */ 3668 m = pmap_pt2_page(pmap, sva); 3669 if (m != NULL) 3670 pmap_unwire_pt2_all(pmap, sva, m, free); 3671 } 3672 } 3673 3674 /* 3675 * Fills L2 page table page with mappings to consecutive physical pages. 3676 */ 3677 static __inline void 3678 pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2) 3679 { 3680 pt2_entry_t *pte2p; 3681 3682 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) { 3683 pte2_store(pte2p, npte2); 3684 npte2 += PTE2_SIZE; 3685 } 3686 } 3687 3688 /* 3689 * Tries to demote a 1MB page mapping. If demotion fails, the 3690 * 1MB page mapping is invalidated. 3691 */ 3692 static boolean_t 3693 pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3694 { 3695 pt1_entry_t opte1, npte1; 3696 pt2_entry_t *fpte2p, npte2; 3697 vm_paddr_t pt2pg_pa, pt2_pa; 3698 vm_page_t m; 3699 struct spglist free; 3700 uint32_t pte1_idx, isnew = 0; 3701 3702 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3703 pmap, va, pte1_load(pte1p), pte1p)); 3704 3705 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3706 3707 opte1 = pte1_load(pte1p); 3708 KASSERT(pte1_is_section(opte1), ("%s: opte1 not a section", __func__)); 3709 3710 if ((opte1 & PTE1_A) == 0 || (m = pmap_pt2_page(pmap, va)) == NULL) { 3711 KASSERT(!pte1_is_wired(opte1), 3712 ("%s: PT2 page for a wired mapping is missing", __func__)); 3713 3714 /* 3715 * Invalidate the 1MB page mapping and return 3716 * "failure" if the mapping was never accessed or the 3717 * allocation of the new page table page fails. 3718 */ 3719 if ((opte1 & PTE1_A) == 0 || (m = vm_page_alloc(NULL, 3720 pte1_index(va) & ~PT2PG_MASK, VM_ALLOC_NOOBJ | 3721 VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { 3722 SLIST_INIT(&free); 3723 pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free); 3724 vm_page_free_pages_toq(&free, false); 3725 CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p", 3726 __func__, va, pmap); 3727 return (FALSE); 3728 } 3729 if (va < VM_MAXUSER_ADDRESS) 3730 pmap->pm_stats.resident_count++; 3731 3732 isnew = 1; 3733 3734 /* 3735 * We init all L2 page tables in the page even if 3736 * we are going to change everything for one L2 page 3737 * table in a while. 3738 */ 3739 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 3740 } else { 3741 if (va < VM_MAXUSER_ADDRESS) { 3742 if (pt2_is_empty(m, va)) 3743 isnew = 1; /* Demoting section w/o promotion. */ 3744 #ifdef INVARIANTS 3745 else 3746 KASSERT(pt2_is_full(m, va), ("%s: bad PT2 wire" 3747 " count %u", __func__, 3748 pt2_wirecount_get(m, pte1_index(va)))); 3749 #endif 3750 } 3751 } 3752 3753 pt2pg_pa = VM_PAGE_TO_PHYS(m); 3754 pte1_idx = pte1_index(va); 3755 /* 3756 * If the pmap is current, then the PT2MAP can provide access to 3757 * the page table page (promoted L2 page tables are not unmapped). 3758 * Otherwise, temporarily map the L2 page table page (m) into 3759 * the kernel's address space at either PADDR1 or PADDR2. 3760 * 3761 * Note that L2 page table size is not equal to PAGE_SIZE. 3762 */ 3763 if (pmap_is_current(pmap)) 3764 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3765 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 3766 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 3767 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 3768 #ifdef SMP 3769 PMAP1cpu = PCPU_GET(cpuid); 3770 #endif 3771 tlb_flush_local((vm_offset_t)PADDR1); 3772 PMAP1changed++; 3773 } else 3774 #ifdef SMP 3775 if (PMAP1cpu != PCPU_GET(cpuid)) { 3776 PMAP1cpu = PCPU_GET(cpuid); 3777 tlb_flush_local((vm_offset_t)PADDR1); 3778 PMAP1changedcpu++; 3779 } else 3780 #endif 3781 PMAP1unchanged++; 3782 fpte2p = page_pt2((vm_offset_t)PADDR1, pte1_idx); 3783 } else { 3784 mtx_lock(&PMAP2mutex); 3785 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 3786 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 3787 tlb_flush((vm_offset_t)PADDR2); 3788 } 3789 fpte2p = page_pt2((vm_offset_t)PADDR2, pte1_idx); 3790 } 3791 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 3792 npte1 = PTE1_LINK(pt2_pa); 3793 3794 KASSERT((opte1 & PTE1_A) != 0, 3795 ("%s: opte1 is missing PTE1_A", __func__)); 3796 KASSERT((opte1 & (PTE1_NM | PTE1_RO)) != PTE1_NM, 3797 ("%s: opte1 has PTE1_NM", __func__)); 3798 3799 /* 3800 * Get pte2 from pte1 format. 3801 */ 3802 npte2 = pte1_pa(opte1) | ATTR_TO_L2(opte1) | PTE2_V; 3803 3804 /* 3805 * If the L2 page table page is new, initialize it. If the mapping 3806 * has changed attributes, update the page table entries. 3807 */ 3808 if (isnew != 0) { 3809 pt2_wirecount_set(m, pte1_idx, NPTE2_IN_PT2); 3810 pmap_fill_pt2(fpte2p, npte2); 3811 } else if ((pte2_load(fpte2p) & PTE2_PROMOTE) != 3812 (npte2 & PTE2_PROMOTE)) 3813 pmap_fill_pt2(fpte2p, npte2); 3814 3815 KASSERT(pte2_pa(pte2_load(fpte2p)) == pte2_pa(npte2), 3816 ("%s: fpte2p and npte2 map different physical addresses", 3817 __func__)); 3818 3819 if (fpte2p == PADDR2) 3820 mtx_unlock(&PMAP2mutex); 3821 3822 /* 3823 * Demote the mapping. This pmap is locked. The old PTE1 has 3824 * PTE1_A set. If the old PTE1 has not PTE1_RO set, it also 3825 * has not PTE1_NM set. Thus, there is no danger of a race with 3826 * another processor changing the setting of PTE1_A and/or PTE1_NM 3827 * between the read above and the store below. 3828 */ 3829 pmap_change_pte1(pmap, pte1p, va, npte1); 3830 3831 /* 3832 * Demote the pv entry. This depends on the earlier demotion 3833 * of the mapping. Specifically, the (re)creation of a per- 3834 * page pv entry might trigger the execution of pmap_pv_reclaim(), 3835 * which might reclaim a newly (re)created per-page pv entry 3836 * and destroy the associated mapping. In order to destroy 3837 * the mapping, the PTE1 must have already changed from mapping 3838 * the 1mpage to referencing the page table page. 3839 */ 3840 if (pte1_is_managed(opte1)) 3841 pmap_pv_demote_pte1(pmap, va, pte1_pa(opte1)); 3842 3843 pmap_pte1_demotions++; 3844 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3845 __func__, va, pmap); 3846 3847 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3848 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3849 return (TRUE); 3850 } 3851 3852 /* 3853 * Insert the given physical page (p) at 3854 * the specified virtual address (v) in the 3855 * target physical map with the protection requested. 3856 * 3857 * If specified, the page will be wired down, meaning 3858 * that the related pte can not be reclaimed. 3859 * 3860 * NB: This is the only routine which MAY NOT lazy-evaluate 3861 * or lose information. That is, this routine must actually 3862 * insert this page into the given map NOW. 3863 */ 3864 int 3865 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3866 u_int flags, int8_t psind) 3867 { 3868 pt1_entry_t *pte1p; 3869 pt2_entry_t *pte2p; 3870 pt2_entry_t npte2, opte2; 3871 pv_entry_t pv; 3872 vm_paddr_t opa, pa; 3873 vm_page_t mpte2, om; 3874 int rv; 3875 3876 va = trunc_page(va); 3877 KASSERT(va <= vm_max_kernel_address, ("%s: toobig", __func__)); 3878 KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, 3879 ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, 3880 va)); 3881 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || 3882 va >= kmi.clean_eva, 3883 ("%s: managed mapping within the clean submap", __func__)); 3884 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 3885 VM_OBJECT_ASSERT_LOCKED(m->object); 3886 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3887 ("%s: flags %u has reserved bits set", __func__, flags)); 3888 pa = VM_PAGE_TO_PHYS(m); 3889 npte2 = PTE2(pa, PTE2_A, vm_page_pte2_attr(m)); 3890 if ((flags & VM_PROT_WRITE) == 0) 3891 npte2 |= PTE2_NM; 3892 if ((prot & VM_PROT_WRITE) == 0) 3893 npte2 |= PTE2_RO; 3894 KASSERT((npte2 & (PTE2_NM | PTE2_RO)) != PTE2_RO, 3895 ("%s: flags includes VM_PROT_WRITE but prot doesn't", __func__)); 3896 if ((prot & VM_PROT_EXECUTE) == 0) 3897 npte2 |= PTE2_NX; 3898 if ((flags & PMAP_ENTER_WIRED) != 0) 3899 npte2 |= PTE2_W; 3900 if (va < VM_MAXUSER_ADDRESS) 3901 npte2 |= PTE2_U; 3902 if (pmap != kernel_pmap) 3903 npte2 |= PTE2_NG; 3904 3905 rw_wlock(&pvh_global_lock); 3906 PMAP_LOCK(pmap); 3907 sched_pin(); 3908 if (psind == 1) { 3909 /* Assert the required virtual and physical alignment. */ 3910 KASSERT((va & PTE1_OFFSET) == 0, 3911 ("%s: va unaligned", __func__)); 3912 KASSERT(m->psind > 0, ("%s: m->psind < psind", __func__)); 3913 rv = pmap_enter_pte1(pmap, va, PTE1_PA(pa) | ATTR_TO_L1(npte2) | 3914 PTE1_V, flags, m); 3915 goto out; 3916 } 3917 3918 /* 3919 * In the case that a page table page is not 3920 * resident, we are creating it here. 3921 */ 3922 if (va < VM_MAXUSER_ADDRESS) { 3923 mpte2 = pmap_allocpte2(pmap, va, flags); 3924 if (mpte2 == NULL) { 3925 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3926 ("pmap_allocpte2 failed with sleep allowed")); 3927 rv = KERN_RESOURCE_SHORTAGE; 3928 goto out; 3929 } 3930 } else 3931 mpte2 = NULL; 3932 pte1p = pmap_pte1(pmap, va); 3933 if (pte1_is_section(pte1_load(pte1p))) 3934 panic("%s: attempted on 1MB page", __func__); 3935 pte2p = pmap_pte2_quick(pmap, va); 3936 if (pte2p == NULL) 3937 panic("%s: invalid L1 page table entry va=%#x", __func__, va); 3938 3939 om = NULL; 3940 opte2 = pte2_load(pte2p); 3941 opa = pte2_pa(opte2); 3942 /* 3943 * Mapping has not changed, must be protection or wiring change. 3944 */ 3945 if (pte2_is_valid(opte2) && (opa == pa)) { 3946 /* 3947 * Wiring change, just update stats. We don't worry about 3948 * wiring PT2 pages as they remain resident as long as there 3949 * are valid mappings in them. Hence, if a user page is wired, 3950 * the PT2 page will be also. 3951 */ 3952 if (pte2_is_wired(npte2) && !pte2_is_wired(opte2)) 3953 pmap->pm_stats.wired_count++; 3954 else if (!pte2_is_wired(npte2) && pte2_is_wired(opte2)) 3955 pmap->pm_stats.wired_count--; 3956 3957 /* 3958 * Remove extra pte2 reference 3959 */ 3960 if (mpte2) 3961 pt2_wirecount_dec(mpte2, pte1_index(va)); 3962 if ((m->oflags & VPO_UNMANAGED) == 0) 3963 om = m; 3964 goto validate; 3965 } 3966 3967 /* 3968 * QQQ: We think that changing physical address on writeable mapping 3969 * is not safe. Well, maybe on kernel address space with correct 3970 * locking, it can make a sense. However, we have no idea why 3971 * anyone should do that on user address space. Are we wrong? 3972 */ 3973 KASSERT((opa == 0) || (opa == pa) || 3974 !pte2_is_valid(opte2) || ((opte2 & PTE2_RO) != 0), 3975 ("%s: pmap %p va %#x(%#x) opa %#x pa %#x - gotcha %#x %#x!", 3976 __func__, pmap, va, opte2, opa, pa, flags, prot)); 3977 3978 pv = NULL; 3979 3980 /* 3981 * Mapping has changed, invalidate old range and fall through to 3982 * handle validating new mapping. 3983 */ 3984 if (opa) { 3985 if (pte2_is_wired(opte2)) 3986 pmap->pm_stats.wired_count--; 3987 om = PHYS_TO_VM_PAGE(opa); 3988 if (om != NULL && (om->oflags & VPO_UNMANAGED) != 0) 3989 om = NULL; 3990 if (om != NULL) 3991 pv = pmap_pvh_remove(&om->md, pmap, va); 3992 3993 /* 3994 * Remove extra pte2 reference 3995 */ 3996 if (mpte2 != NULL) 3997 pt2_wirecount_dec(mpte2, va >> PTE1_SHIFT); 3998 } else 3999 pmap->pm_stats.resident_count++; 4000 4001 /* 4002 * Enter on the PV list if part of our managed memory. 4003 */ 4004 if ((m->oflags & VPO_UNMANAGED) == 0) { 4005 if (pv == NULL) { 4006 pv = get_pv_entry(pmap, FALSE); 4007 pv->pv_va = va; 4008 } 4009 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 4010 } else if (pv != NULL) 4011 free_pv_entry(pmap, pv); 4012 4013 /* 4014 * Increment counters 4015 */ 4016 if (pte2_is_wired(npte2)) 4017 pmap->pm_stats.wired_count++; 4018 4019 validate: 4020 /* 4021 * Now validate mapping with desired protection/wiring. 4022 */ 4023 if (prot & VM_PROT_WRITE) { 4024 if ((m->oflags & VPO_UNMANAGED) == 0) 4025 vm_page_aflag_set(m, PGA_WRITEABLE); 4026 } 4027 4028 /* 4029 * If the mapping or permission bits are different, we need 4030 * to update the pte2. 4031 * 4032 * QQQ: Think again and again what to do 4033 * if the mapping is going to be changed! 4034 */ 4035 if ((opte2 & ~(PTE2_NM | PTE2_A)) != (npte2 & ~(PTE2_NM | PTE2_A))) { 4036 /* 4037 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4038 * is set. Do it now, before the mapping is stored and made 4039 * valid for hardware table walk. If done later, there is a race 4040 * for other threads of current process in lazy loading case. 4041 * Don't do it for kernel memory which is mapped with exec 4042 * permission even if the memory isn't going to hold executable 4043 * code. The only time when icache sync is needed is after 4044 * kernel module is loaded and the relocation info is processed. 4045 * And it's done in elf_cpu_load_file(). 4046 * 4047 * QQQ: (1) Does it exist any better way where 4048 * or how to sync icache? 4049 * (2) Now, we do it on a page basis. 4050 */ 4051 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && 4052 m->md.pat_mode == VM_MEMATTR_WB_WA && 4053 (opa != pa || (opte2 & PTE2_NX))) 4054 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4055 4056 if (opte2 & PTE2_V) { 4057 /* Change mapping with break-before-make approach. */ 4058 opte2 = pte2_load_clear(pte2p); 4059 pmap_tlb_flush(pmap, va); 4060 pte2_store(pte2p, npte2); 4061 if (om != NULL) { 4062 KASSERT((om->oflags & VPO_UNMANAGED) == 0, 4063 ("%s: om %p unmanaged", __func__, om)); 4064 if ((opte2 & PTE2_A) != 0) 4065 vm_page_aflag_set(om, PGA_REFERENCED); 4066 if (pte2_is_dirty(opte2)) 4067 vm_page_dirty(om); 4068 if (TAILQ_EMPTY(&om->md.pv_list) && 4069 ((om->flags & PG_FICTITIOUS) != 0 || 4070 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 4071 vm_page_aflag_clear(om, PGA_WRITEABLE); 4072 } 4073 } else 4074 pte2_store(pte2p, npte2); 4075 } 4076 #if 0 4077 else { 4078 /* 4079 * QQQ: In time when both access and not mofified bits are 4080 * emulated by software, this should not happen. Some 4081 * analysis is need, if this really happen. Missing 4082 * tlb flush somewhere could be the reason. 4083 */ 4084 panic("%s: pmap %p va %#x opte2 %x npte2 %x !!", __func__, pmap, 4085 va, opte2, npte2); 4086 } 4087 #endif 4088 4089 #if VM_NRESERVLEVEL > 0 4090 /* 4091 * If both the L2 page table page and the reservation are fully 4092 * populated, then attempt promotion. 4093 */ 4094 if ((mpte2 == NULL || pt2_is_full(mpte2, va)) && 4095 sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && 4096 vm_reserv_level_iffullpop(m) == 0) 4097 pmap_promote_pte1(pmap, pte1p, va); 4098 #endif 4099 4100 rv = KERN_SUCCESS; 4101 out: 4102 sched_unpin(); 4103 rw_wunlock(&pvh_global_lock); 4104 PMAP_UNLOCK(pmap); 4105 return (rv); 4106 } 4107 4108 /* 4109 * Do the things to unmap a page in a process. 4110 */ 4111 static int 4112 pmap_remove_pte2(pmap_t pmap, pt2_entry_t *pte2p, vm_offset_t va, 4113 struct spglist *free) 4114 { 4115 pt2_entry_t opte2; 4116 vm_page_t m; 4117 4118 rw_assert(&pvh_global_lock, RA_WLOCKED); 4119 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4120 4121 /* Clear and invalidate the mapping. */ 4122 opte2 = pte2_load_clear(pte2p); 4123 pmap_tlb_flush(pmap, va); 4124 4125 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %#x not link pte2 %#x", 4126 __func__, pmap, va, opte2)); 4127 4128 if (opte2 & PTE2_W) 4129 pmap->pm_stats.wired_count -= 1; 4130 pmap->pm_stats.resident_count -= 1; 4131 if (pte2_is_managed(opte2)) { 4132 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 4133 if (pte2_is_dirty(opte2)) 4134 vm_page_dirty(m); 4135 if (opte2 & PTE2_A) 4136 vm_page_aflag_set(m, PGA_REFERENCED); 4137 pmap_remove_entry(pmap, m, va); 4138 } 4139 return (pmap_unuse_pt2(pmap, va, free)); 4140 } 4141 4142 /* 4143 * Remove a single page from a process address space. 4144 */ 4145 static void 4146 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 4147 { 4148 pt2_entry_t *pte2p; 4149 4150 rw_assert(&pvh_global_lock, RA_WLOCKED); 4151 KASSERT(curthread->td_pinned > 0, 4152 ("%s: curthread not pinned", __func__)); 4153 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4154 if ((pte2p = pmap_pte2_quick(pmap, va)) == NULL || 4155 !pte2_is_valid(pte2_load(pte2p))) 4156 return; 4157 pmap_remove_pte2(pmap, pte2p, va, free); 4158 } 4159 4160 /* 4161 * Remove the given range of addresses from the specified map. 4162 * 4163 * It is assumed that the start and end are properly 4164 * rounded to the page size. 4165 */ 4166 void 4167 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4168 { 4169 vm_offset_t nextva; 4170 pt1_entry_t *pte1p, pte1; 4171 pt2_entry_t *pte2p, pte2; 4172 struct spglist free; 4173 4174 /* 4175 * Perform an unsynchronized read. This is, however, safe. 4176 */ 4177 if (pmap->pm_stats.resident_count == 0) 4178 return; 4179 4180 SLIST_INIT(&free); 4181 4182 rw_wlock(&pvh_global_lock); 4183 sched_pin(); 4184 PMAP_LOCK(pmap); 4185 4186 /* 4187 * Special handling of removing one page. A very common 4188 * operation and easy to short circuit some code. 4189 */ 4190 if (sva + PAGE_SIZE == eva) { 4191 pte1 = pte1_load(pmap_pte1(pmap, sva)); 4192 if (pte1_is_link(pte1)) { 4193 pmap_remove_page(pmap, sva, &free); 4194 goto out; 4195 } 4196 } 4197 4198 for (; sva < eva; sva = nextva) { 4199 /* 4200 * Calculate address for next L2 page table. 4201 */ 4202 nextva = pte1_trunc(sva + PTE1_SIZE); 4203 if (nextva < sva) 4204 nextva = eva; 4205 if (pmap->pm_stats.resident_count == 0) 4206 break; 4207 4208 pte1p = pmap_pte1(pmap, sva); 4209 pte1 = pte1_load(pte1p); 4210 4211 /* 4212 * Weed out invalid mappings. Note: we assume that the L1 page 4213 * table is always allocated, and in kernel virtual. 4214 */ 4215 if (pte1 == 0) 4216 continue; 4217 4218 if (pte1_is_section(pte1)) { 4219 /* 4220 * Are we removing the entire large page? If not, 4221 * demote the mapping and fall through. 4222 */ 4223 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4224 pmap_remove_pte1(pmap, pte1p, sva, &free); 4225 continue; 4226 } else if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4227 /* The large page mapping was destroyed. */ 4228 continue; 4229 } 4230 #ifdef INVARIANTS 4231 else { 4232 /* Update pte1 after demotion. */ 4233 pte1 = pte1_load(pte1p); 4234 } 4235 #endif 4236 } 4237 4238 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 4239 " is not link", __func__, pmap, sva, pte1, pte1p)); 4240 4241 /* 4242 * Limit our scan to either the end of the va represented 4243 * by the current L2 page table page, or to the end of the 4244 * range being removed. 4245 */ 4246 if (nextva > eva) 4247 nextva = eva; 4248 4249 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; 4250 pte2p++, sva += PAGE_SIZE) { 4251 pte2 = pte2_load(pte2p); 4252 if (!pte2_is_valid(pte2)) 4253 continue; 4254 if (pmap_remove_pte2(pmap, pte2p, sva, &free)) 4255 break; 4256 } 4257 } 4258 out: 4259 sched_unpin(); 4260 rw_wunlock(&pvh_global_lock); 4261 PMAP_UNLOCK(pmap); 4262 vm_page_free_pages_toq(&free, false); 4263 } 4264 4265 /* 4266 * Routine: pmap_remove_all 4267 * Function: 4268 * Removes this physical page from 4269 * all physical maps in which it resides. 4270 * Reflects back modify bits to the pager. 4271 * 4272 * Notes: 4273 * Original versions of this routine were very 4274 * inefficient because they iteratively called 4275 * pmap_remove (slow...) 4276 */ 4277 4278 void 4279 pmap_remove_all(vm_page_t m) 4280 { 4281 struct md_page *pvh; 4282 pv_entry_t pv; 4283 pmap_t pmap; 4284 pt2_entry_t *pte2p, opte2; 4285 pt1_entry_t *pte1p; 4286 vm_offset_t va; 4287 struct spglist free; 4288 4289 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4290 ("%s: page %p is not managed", __func__, m)); 4291 SLIST_INIT(&free); 4292 rw_wlock(&pvh_global_lock); 4293 sched_pin(); 4294 if ((m->flags & PG_FICTITIOUS) != 0) 4295 goto small_mappings; 4296 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4297 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 4298 va = pv->pv_va; 4299 pmap = PV_PMAP(pv); 4300 PMAP_LOCK(pmap); 4301 pte1p = pmap_pte1(pmap, va); 4302 (void)pmap_demote_pte1(pmap, pte1p, va); 4303 PMAP_UNLOCK(pmap); 4304 } 4305 small_mappings: 4306 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 4307 pmap = PV_PMAP(pv); 4308 PMAP_LOCK(pmap); 4309 pmap->pm_stats.resident_count--; 4310 pte1p = pmap_pte1(pmap, pv->pv_va); 4311 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found " 4312 "a 1mpage in page %p's pv list", __func__, m)); 4313 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 4314 opte2 = pte2_load_clear(pte2p); 4315 pmap_tlb_flush(pmap, pv->pv_va); 4316 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %x zero pte2", 4317 __func__, pmap, pv->pv_va)); 4318 if (pte2_is_wired(opte2)) 4319 pmap->pm_stats.wired_count--; 4320 if (opte2 & PTE2_A) 4321 vm_page_aflag_set(m, PGA_REFERENCED); 4322 4323 /* 4324 * Update the vm_page_t clean and reference bits. 4325 */ 4326 if (pte2_is_dirty(opte2)) 4327 vm_page_dirty(m); 4328 pmap_unuse_pt2(pmap, pv->pv_va, &free); 4329 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4330 free_pv_entry(pmap, pv); 4331 PMAP_UNLOCK(pmap); 4332 } 4333 vm_page_aflag_clear(m, PGA_WRITEABLE); 4334 sched_unpin(); 4335 rw_wunlock(&pvh_global_lock); 4336 vm_page_free_pages_toq(&free, false); 4337 } 4338 4339 /* 4340 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4341 * good coding style, a.k.a. 80 character line width limit hell. 4342 */ 4343 static __inline void 4344 pmap_remove_pte1_quick(pmap_t pmap, pt1_entry_t pte1, pv_entry_t pv, 4345 struct spglist *free) 4346 { 4347 vm_paddr_t pa; 4348 vm_page_t m, mt, mpt2pg; 4349 struct md_page *pvh; 4350 4351 pa = pte1_pa(pte1); 4352 m = PHYS_TO_VM_PAGE(pa); 4353 4354 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4355 __func__, m, m->phys_addr, pa)); 4356 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4357 m < &vm_page_array[vm_page_array_size], 4358 ("%s: bad pte1 %#x", __func__, pte1)); 4359 4360 if (pte1_is_dirty(pte1)) { 4361 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4362 vm_page_dirty(mt); 4363 } 4364 4365 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 4366 pvh = pa_to_pvh(pa); 4367 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4368 if (TAILQ_EMPTY(&pvh->pv_list)) { 4369 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4370 if (TAILQ_EMPTY(&mt->md.pv_list)) 4371 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4372 } 4373 mpt2pg = pmap_pt2_page(pmap, pv->pv_va); 4374 if (mpt2pg != NULL) 4375 pmap_unwire_pt2_all(pmap, pv->pv_va, mpt2pg, free); 4376 } 4377 4378 /* 4379 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4380 * good coding style, a.k.a. 80 character line width limit hell. 4381 */ 4382 static __inline void 4383 pmap_remove_pte2_quick(pmap_t pmap, pt2_entry_t pte2, pv_entry_t pv, 4384 struct spglist *free) 4385 { 4386 vm_paddr_t pa; 4387 vm_page_t m; 4388 struct md_page *pvh; 4389 4390 pa = pte2_pa(pte2); 4391 m = PHYS_TO_VM_PAGE(pa); 4392 4393 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4394 __func__, m, m->phys_addr, pa)); 4395 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4396 m < &vm_page_array[vm_page_array_size], 4397 ("%s: bad pte2 %#x", __func__, pte2)); 4398 4399 if (pte2_is_dirty(pte2)) 4400 vm_page_dirty(m); 4401 4402 pmap->pm_stats.resident_count--; 4403 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4404 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 4405 pvh = pa_to_pvh(pa); 4406 if (TAILQ_EMPTY(&pvh->pv_list)) 4407 vm_page_aflag_clear(m, PGA_WRITEABLE); 4408 } 4409 pmap_unuse_pt2(pmap, pv->pv_va, free); 4410 } 4411 4412 /* 4413 * Remove all pages from specified address space this aids process 4414 * exit speeds. Also, this code is special cased for current process 4415 * only, but can have the more generic (and slightly slower) mode enabled. 4416 * This is much faster than pmap_remove in the case of running down 4417 * an entire address space. 4418 */ 4419 void 4420 pmap_remove_pages(pmap_t pmap) 4421 { 4422 pt1_entry_t *pte1p, pte1; 4423 pt2_entry_t *pte2p, pte2; 4424 pv_entry_t pv; 4425 struct pv_chunk *pc, *npc; 4426 struct spglist free; 4427 int field, idx; 4428 int32_t bit; 4429 uint32_t inuse, bitmask; 4430 boolean_t allfree; 4431 4432 /* 4433 * Assert that the given pmap is only active on the current 4434 * CPU. Unfortunately, we cannot block another CPU from 4435 * activating the pmap while this function is executing. 4436 */ 4437 KASSERT(pmap == vmspace_pmap(curthread->td_proc->p_vmspace), 4438 ("%s: non-current pmap %p", __func__, pmap)); 4439 #if defined(SMP) && defined(INVARIANTS) 4440 { 4441 cpuset_t other_cpus; 4442 4443 sched_pin(); 4444 other_cpus = pmap->pm_active; 4445 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 4446 sched_unpin(); 4447 KASSERT(CPU_EMPTY(&other_cpus), 4448 ("%s: pmap %p active on other cpus", __func__, pmap)); 4449 } 4450 #endif 4451 SLIST_INIT(&free); 4452 rw_wlock(&pvh_global_lock); 4453 PMAP_LOCK(pmap); 4454 sched_pin(); 4455 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4456 KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p", 4457 __func__, pmap, pc->pc_pmap)); 4458 allfree = TRUE; 4459 for (field = 0; field < _NPCM; field++) { 4460 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 4461 while (inuse != 0) { 4462 bit = ffs(inuse) - 1; 4463 bitmask = 1UL << bit; 4464 idx = field * 32 + bit; 4465 pv = &pc->pc_pventry[idx]; 4466 inuse &= ~bitmask; 4467 4468 /* 4469 * Note that we cannot remove wired pages 4470 * from a process' mapping at this time 4471 */ 4472 pte1p = pmap_pte1(pmap, pv->pv_va); 4473 pte1 = pte1_load(pte1p); 4474 if (pte1_is_section(pte1)) { 4475 if (pte1_is_wired(pte1)) { 4476 allfree = FALSE; 4477 continue; 4478 } 4479 pte1_clear(pte1p); 4480 pmap_remove_pte1_quick(pmap, pte1, pv, 4481 &free); 4482 } 4483 else if (pte1_is_link(pte1)) { 4484 pte2p = pt2map_entry(pv->pv_va); 4485 pte2 = pte2_load(pte2p); 4486 4487 if (!pte2_is_valid(pte2)) { 4488 printf("%s: pmap %p va %#x " 4489 "pte2 %#x\n", __func__, 4490 pmap, pv->pv_va, pte2); 4491 panic("bad pte2"); 4492 } 4493 4494 if (pte2_is_wired(pte2)) { 4495 allfree = FALSE; 4496 continue; 4497 } 4498 pte2_clear(pte2p); 4499 pmap_remove_pte2_quick(pmap, pte2, pv, 4500 &free); 4501 } else { 4502 printf("%s: pmap %p va %#x pte1 %#x\n", 4503 __func__, pmap, pv->pv_va, pte1); 4504 panic("bad pte1"); 4505 } 4506 4507 /* Mark free */ 4508 PV_STAT(pv_entry_frees++); 4509 PV_STAT(pv_entry_spare++); 4510 pv_entry_count--; 4511 pc->pc_map[field] |= bitmask; 4512 } 4513 } 4514 if (allfree) { 4515 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4516 free_pv_chunk(pc); 4517 } 4518 } 4519 tlb_flush_all_ng_local(); 4520 sched_unpin(); 4521 rw_wunlock(&pvh_global_lock); 4522 PMAP_UNLOCK(pmap); 4523 vm_page_free_pages_toq(&free, false); 4524 } 4525 4526 /* 4527 * This code makes some *MAJOR* assumptions: 4528 * 1. Current pmap & pmap exists. 4529 * 2. Not wired. 4530 * 3. Read access. 4531 * 4. No L2 page table pages. 4532 * but is *MUCH* faster than pmap_enter... 4533 */ 4534 static vm_page_t 4535 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4536 vm_prot_t prot, vm_page_t mpt2pg) 4537 { 4538 pt2_entry_t *pte2p, pte2; 4539 vm_paddr_t pa; 4540 struct spglist free; 4541 uint32_t l2prot; 4542 4543 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 4544 (m->oflags & VPO_UNMANAGED) != 0, 4545 ("%s: managed mapping within the clean submap", __func__)); 4546 rw_assert(&pvh_global_lock, RA_WLOCKED); 4547 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4548 4549 /* 4550 * In the case that a L2 page table page is not 4551 * resident, we are creating it here. 4552 */ 4553 if (va < VM_MAXUSER_ADDRESS) { 4554 u_int pte1_idx; 4555 pt1_entry_t pte1, *pte1p; 4556 vm_paddr_t pt2_pa; 4557 4558 /* 4559 * Get L1 page table things. 4560 */ 4561 pte1_idx = pte1_index(va); 4562 pte1p = pmap_pte1(pmap, va); 4563 pte1 = pte1_load(pte1p); 4564 4565 if (mpt2pg && (mpt2pg->pindex == (pte1_idx & ~PT2PG_MASK))) { 4566 /* 4567 * Each of NPT2_IN_PG L2 page tables on the page can 4568 * come here. Make sure that associated L1 page table 4569 * link is established. 4570 * 4571 * QQQ: It comes that we don't establish all links to 4572 * L2 page tables for newly allocated L2 page 4573 * tables page. 4574 */ 4575 KASSERT(!pte1_is_section(pte1), 4576 ("%s: pte1 %#x is section", __func__, pte1)); 4577 if (!pte1_is_link(pte1)) { 4578 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(mpt2pg), 4579 pte1_idx); 4580 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 4581 } 4582 pt2_wirecount_inc(mpt2pg, pte1_idx); 4583 } else { 4584 /* 4585 * If the L2 page table page is mapped, we just 4586 * increment the hold count, and activate it. 4587 */ 4588 if (pte1_is_section(pte1)) { 4589 return (NULL); 4590 } else if (pte1_is_link(pte1)) { 4591 mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 4592 pt2_wirecount_inc(mpt2pg, pte1_idx); 4593 } else { 4594 mpt2pg = _pmap_allocpte2(pmap, va, 4595 PMAP_ENTER_NOSLEEP); 4596 if (mpt2pg == NULL) 4597 return (NULL); 4598 } 4599 } 4600 } else { 4601 mpt2pg = NULL; 4602 } 4603 4604 /* 4605 * This call to pt2map_entry() makes the assumption that we are 4606 * entering the page into the current pmap. In order to support 4607 * quick entry into any pmap, one would likely use pmap_pte2_quick(). 4608 * But that isn't as quick as pt2map_entry(). 4609 */ 4610 pte2p = pt2map_entry(va); 4611 pte2 = pte2_load(pte2p); 4612 if (pte2_is_valid(pte2)) { 4613 if (mpt2pg != NULL) { 4614 /* 4615 * Remove extra pte2 reference 4616 */ 4617 pt2_wirecount_dec(mpt2pg, pte1_index(va)); 4618 mpt2pg = NULL; 4619 } 4620 return (NULL); 4621 } 4622 4623 /* 4624 * Enter on the PV list if part of our managed memory. 4625 */ 4626 if ((m->oflags & VPO_UNMANAGED) == 0 && 4627 !pmap_try_insert_pv_entry(pmap, va, m)) { 4628 if (mpt2pg != NULL) { 4629 SLIST_INIT(&free); 4630 if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) { 4631 pmap_tlb_flush(pmap, va); 4632 vm_page_free_pages_toq(&free, false); 4633 } 4634 4635 mpt2pg = NULL; 4636 } 4637 return (NULL); 4638 } 4639 4640 /* 4641 * Increment counters 4642 */ 4643 pmap->pm_stats.resident_count++; 4644 4645 /* 4646 * Now validate mapping with RO protection 4647 */ 4648 pa = VM_PAGE_TO_PHYS(m); 4649 l2prot = PTE2_RO | PTE2_NM; 4650 if (va < VM_MAXUSER_ADDRESS) 4651 l2prot |= PTE2_U | PTE2_NG; 4652 if ((prot & VM_PROT_EXECUTE) == 0) 4653 l2prot |= PTE2_NX; 4654 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) { 4655 /* 4656 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4657 * is set. QQQ: For more info, see comments in pmap_enter(). 4658 */ 4659 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4660 } 4661 pte2_store(pte2p, PTE2(pa, l2prot, vm_page_pte2_attr(m))); 4662 4663 return (mpt2pg); 4664 } 4665 4666 void 4667 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4668 { 4669 4670 rw_wlock(&pvh_global_lock); 4671 PMAP_LOCK(pmap); 4672 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4673 rw_wunlock(&pvh_global_lock); 4674 PMAP_UNLOCK(pmap); 4675 } 4676 4677 /* 4678 * Tries to create a read- and/or execute-only 1 MB page mapping. Returns 4679 * true if successful. Returns false if (1) a mapping already exists at the 4680 * specified virtual address or (2) a PV entry cannot be allocated without 4681 * reclaiming another PV entry. 4682 */ 4683 static bool 4684 pmap_enter_1mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4685 { 4686 pt1_entry_t pte1; 4687 vm_paddr_t pa; 4688 4689 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4690 pa = VM_PAGE_TO_PHYS(m); 4691 pte1 = PTE1(pa, PTE1_NM | PTE1_RO, ATTR_TO_L1(vm_page_pte2_attr(m))); 4692 if ((prot & VM_PROT_EXECUTE) == 0) 4693 pte1 |= PTE1_NX; 4694 if (va < VM_MAXUSER_ADDRESS) 4695 pte1 |= PTE1_U; 4696 if (pmap != kernel_pmap) 4697 pte1 |= PTE1_NG; 4698 return (pmap_enter_pte1(pmap, va, pte1, PMAP_ENTER_NOSLEEP | 4699 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m) == KERN_SUCCESS); 4700 } 4701 4702 /* 4703 * Tries to create the specified 1 MB page mapping. Returns KERN_SUCCESS if 4704 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE 4705 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and 4706 * a mapping already exists at the specified virtual address. Returns 4707 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NORECLAIM was specified and PV entry 4708 * allocation failed. 4709 */ 4710 static int 4711 pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags, 4712 vm_page_t m) 4713 { 4714 struct spglist free; 4715 pt1_entry_t opte1, *pte1p; 4716 pt2_entry_t pte2, *pte2p; 4717 vm_offset_t cur, end; 4718 vm_page_t mt; 4719 4720 rw_assert(&pvh_global_lock, RA_WLOCKED); 4721 KASSERT((pte1 & (PTE1_NM | PTE1_RO)) == 0 || 4722 (pte1 & (PTE1_NM | PTE1_RO)) == (PTE1_NM | PTE1_RO), 4723 ("%s: pte1 has inconsistent NM and RO attributes", __func__)); 4724 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4725 pte1p = pmap_pte1(pmap, va); 4726 opte1 = pte1_load(pte1p); 4727 if (pte1_is_valid(opte1)) { 4728 if ((flags & PMAP_ENTER_NOREPLACE) != 0) { 4729 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4730 __func__, va, pmap); 4731 return (KERN_FAILURE); 4732 } 4733 /* Break the existing mapping(s). */ 4734 SLIST_INIT(&free); 4735 if (pte1_is_section(opte1)) { 4736 /* 4737 * If the section resulted from a promotion, then a 4738 * reserved PT page could be freed. 4739 */ 4740 pmap_remove_pte1(pmap, pte1p, va, &free); 4741 } else { 4742 sched_pin(); 4743 end = va + PTE1_SIZE; 4744 for (cur = va, pte2p = pmap_pte2_quick(pmap, va); 4745 cur != end; cur += PAGE_SIZE, pte2p++) { 4746 pte2 = pte2_load(pte2p); 4747 if (!pte2_is_valid(pte2)) 4748 continue; 4749 if (pmap_remove_pte2(pmap, pte2p, cur, &free)) 4750 break; 4751 } 4752 sched_unpin(); 4753 } 4754 vm_page_free_pages_toq(&free, false); 4755 } 4756 if ((m->oflags & VPO_UNMANAGED) == 0) { 4757 /* 4758 * Abort this mapping if its PV entry could not be created. 4759 */ 4760 if (!pmap_pv_insert_pte1(pmap, va, pte1, flags)) { 4761 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4762 __func__, va, pmap); 4763 return (KERN_RESOURCE_SHORTAGE); 4764 } 4765 if ((pte1 & PTE1_RO) == 0) { 4766 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4767 vm_page_aflag_set(mt, PGA_WRITEABLE); 4768 } 4769 } 4770 4771 /* 4772 * Increment counters. 4773 */ 4774 if (pte1_is_wired(pte1)) 4775 pmap->pm_stats.wired_count += PTE1_SIZE / PAGE_SIZE; 4776 pmap->pm_stats.resident_count += PTE1_SIZE / PAGE_SIZE; 4777 4778 /* 4779 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4780 * is set. QQQ: For more info, see comments in pmap_enter(). 4781 */ 4782 if ((pte1 & PTE1_NX) == 0 && m->md.pat_mode == VM_MEMATTR_WB_WA && 4783 pmap != kernel_pmap && (!pte1_is_section(opte1) || 4784 pte1_pa(opte1) != VM_PAGE_TO_PHYS(m) || (opte1 & PTE2_NX) != 0)) 4785 cache_icache_sync_fresh(va, VM_PAGE_TO_PHYS(m), PTE1_SIZE); 4786 4787 /* 4788 * Map the section. 4789 */ 4790 pte1_store(pte1p, pte1); 4791 4792 pmap_pte1_mappings++; 4793 CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, 4794 pmap); 4795 return (KERN_SUCCESS); 4796 } 4797 4798 /* 4799 * Maps a sequence of resident pages belonging to the same object. 4800 * The sequence begins with the given page m_start. This page is 4801 * mapped at the given virtual address start. Each subsequent page is 4802 * mapped at a virtual address that is offset from start by the same 4803 * amount as the page is offset from m_start within the object. The 4804 * last page in the sequence is the page with the largest offset from 4805 * m_start that can be mapped at a virtual address less than the given 4806 * virtual address end. Not every virtual page between start and end 4807 * is mapped; only those for which a resident page exists with the 4808 * corresponding offset from m_start are mapped. 4809 */ 4810 void 4811 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4812 vm_page_t m_start, vm_prot_t prot) 4813 { 4814 vm_offset_t va; 4815 vm_page_t m, mpt2pg; 4816 vm_pindex_t diff, psize; 4817 4818 PDEBUG(6, printf("%s: pmap %p start %#x end %#x m %p prot %#x\n", 4819 __func__, pmap, start, end, m_start, prot)); 4820 4821 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4822 psize = atop(end - start); 4823 mpt2pg = NULL; 4824 m = m_start; 4825 rw_wlock(&pvh_global_lock); 4826 PMAP_LOCK(pmap); 4827 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4828 va = start + ptoa(diff); 4829 if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end && 4830 m->psind == 1 && sp_enabled && 4831 pmap_enter_1mpage(pmap, va, m, prot)) 4832 m = &m[PTE1_SIZE / PAGE_SIZE - 1]; 4833 else 4834 mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot, 4835 mpt2pg); 4836 m = TAILQ_NEXT(m, listq); 4837 } 4838 rw_wunlock(&pvh_global_lock); 4839 PMAP_UNLOCK(pmap); 4840 } 4841 4842 /* 4843 * This code maps large physical mmap regions into the 4844 * processor address space. Note that some shortcuts 4845 * are taken, but the code works. 4846 */ 4847 void 4848 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 4849 vm_pindex_t pindex, vm_size_t size) 4850 { 4851 pt1_entry_t *pte1p; 4852 vm_paddr_t pa, pte2_pa; 4853 vm_page_t p; 4854 vm_memattr_t pat_mode; 4855 u_int l1attr, l1prot; 4856 4857 VM_OBJECT_ASSERT_WLOCKED(object); 4858 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4859 ("%s: non-device object", __func__)); 4860 if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) { 4861 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4862 return; 4863 p = vm_page_lookup(object, pindex); 4864 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4865 ("%s: invalid page %p", __func__, p)); 4866 pat_mode = p->md.pat_mode; 4867 4868 /* 4869 * Abort the mapping if the first page is not physically 4870 * aligned to a 1MB page boundary. 4871 */ 4872 pte2_pa = VM_PAGE_TO_PHYS(p); 4873 if (pte2_pa & PTE1_OFFSET) 4874 return; 4875 4876 /* 4877 * Skip the first page. Abort the mapping if the rest of 4878 * the pages are not physically contiguous or have differing 4879 * memory attributes. 4880 */ 4881 p = TAILQ_NEXT(p, listq); 4882 for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size; 4883 pa += PAGE_SIZE) { 4884 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4885 ("%s: invalid page %p", __func__, p)); 4886 if (pa != VM_PAGE_TO_PHYS(p) || 4887 pat_mode != p->md.pat_mode) 4888 return; 4889 p = TAILQ_NEXT(p, listq); 4890 } 4891 4892 /* 4893 * Map using 1MB pages. 4894 * 4895 * QQQ: Well, we are mapping a section, so same condition must 4896 * be hold like during promotion. It looks that only RW mapping 4897 * is done here, so readonly mapping must be done elsewhere. 4898 */ 4899 l1prot = PTE1_U | PTE1_NG | PTE1_RW | PTE1_M | PTE1_A; 4900 l1attr = ATTR_TO_L1(vm_memattr_to_pte2(pat_mode)); 4901 PMAP_LOCK(pmap); 4902 for (pa = pte2_pa; pa < pte2_pa + size; pa += PTE1_SIZE) { 4903 pte1p = pmap_pte1(pmap, addr); 4904 if (!pte1_is_valid(pte1_load(pte1p))) { 4905 pte1_store(pte1p, PTE1(pa, l1prot, l1attr)); 4906 pmap->pm_stats.resident_count += PTE1_SIZE / 4907 PAGE_SIZE; 4908 pmap_pte1_mappings++; 4909 } 4910 /* Else continue on if the PTE1 is already valid. */ 4911 addr += PTE1_SIZE; 4912 } 4913 PMAP_UNLOCK(pmap); 4914 } 4915 } 4916 4917 /* 4918 * Do the things to protect a 1mpage in a process. 4919 */ 4920 static void 4921 pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 4922 vm_prot_t prot) 4923 { 4924 pt1_entry_t npte1, opte1; 4925 vm_offset_t eva, va; 4926 vm_page_t m; 4927 4928 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4929 KASSERT((sva & PTE1_OFFSET) == 0, 4930 ("%s: sva is not 1mpage aligned", __func__)); 4931 4932 opte1 = npte1 = pte1_load(pte1p); 4933 if (pte1_is_managed(opte1) && pte1_is_dirty(opte1)) { 4934 eva = sva + PTE1_SIZE; 4935 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 4936 va < eva; va += PAGE_SIZE, m++) 4937 vm_page_dirty(m); 4938 } 4939 if ((prot & VM_PROT_WRITE) == 0) 4940 npte1 |= PTE1_RO | PTE1_NM; 4941 if ((prot & VM_PROT_EXECUTE) == 0) 4942 npte1 |= PTE1_NX; 4943 4944 /* 4945 * QQQ: Herein, execute permission is never set. 4946 * It only can be cleared. So, no icache 4947 * syncing is needed. 4948 */ 4949 4950 if (npte1 != opte1) { 4951 pte1_store(pte1p, npte1); 4952 pmap_tlb_flush(pmap, sva); 4953 } 4954 } 4955 4956 /* 4957 * Set the physical protection on the 4958 * specified range of this map as requested. 4959 */ 4960 void 4961 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 4962 { 4963 boolean_t pv_lists_locked; 4964 vm_offset_t nextva; 4965 pt1_entry_t *pte1p, pte1; 4966 pt2_entry_t *pte2p, opte2, npte2; 4967 4968 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 4969 if (prot == VM_PROT_NONE) { 4970 pmap_remove(pmap, sva, eva); 4971 return; 4972 } 4973 4974 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 4975 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 4976 return; 4977 4978 if (pmap_is_current(pmap)) 4979 pv_lists_locked = FALSE; 4980 else { 4981 pv_lists_locked = TRUE; 4982 resume: 4983 rw_wlock(&pvh_global_lock); 4984 sched_pin(); 4985 } 4986 4987 PMAP_LOCK(pmap); 4988 for (; sva < eva; sva = nextva) { 4989 /* 4990 * Calculate address for next L2 page table. 4991 */ 4992 nextva = pte1_trunc(sva + PTE1_SIZE); 4993 if (nextva < sva) 4994 nextva = eva; 4995 4996 pte1p = pmap_pte1(pmap, sva); 4997 pte1 = pte1_load(pte1p); 4998 4999 /* 5000 * Weed out invalid mappings. Note: we assume that L1 page 5001 * page table is always allocated, and in kernel virtual. 5002 */ 5003 if (pte1 == 0) 5004 continue; 5005 5006 if (pte1_is_section(pte1)) { 5007 /* 5008 * Are we protecting the entire large page? If not, 5009 * demote the mapping and fall through. 5010 */ 5011 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 5012 pmap_protect_pte1(pmap, pte1p, sva, prot); 5013 continue; 5014 } else { 5015 if (!pv_lists_locked) { 5016 pv_lists_locked = TRUE; 5017 if (!rw_try_wlock(&pvh_global_lock)) { 5018 PMAP_UNLOCK(pmap); 5019 goto resume; 5020 } 5021 sched_pin(); 5022 } 5023 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 5024 /* 5025 * The large page mapping 5026 * was destroyed. 5027 */ 5028 continue; 5029 } 5030 #ifdef INVARIANTS 5031 else { 5032 /* Update pte1 after demotion */ 5033 pte1 = pte1_load(pte1p); 5034 } 5035 #endif 5036 } 5037 } 5038 5039 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5040 " is not link", __func__, pmap, sva, pte1, pte1p)); 5041 5042 /* 5043 * Limit our scan to either the end of the va represented 5044 * by the current L2 page table page, or to the end of the 5045 * range being protected. 5046 */ 5047 if (nextva > eva) 5048 nextva = eva; 5049 5050 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5051 sva += PAGE_SIZE) { 5052 vm_page_t m; 5053 5054 opte2 = npte2 = pte2_load(pte2p); 5055 if (!pte2_is_valid(opte2)) 5056 continue; 5057 5058 if ((prot & VM_PROT_WRITE) == 0) { 5059 if (pte2_is_managed(opte2) && 5060 pte2_is_dirty(opte2)) { 5061 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 5062 vm_page_dirty(m); 5063 } 5064 npte2 |= PTE2_RO | PTE2_NM; 5065 } 5066 5067 if ((prot & VM_PROT_EXECUTE) == 0) 5068 npte2 |= PTE2_NX; 5069 5070 /* 5071 * QQQ: Herein, execute permission is never set. 5072 * It only can be cleared. So, no icache 5073 * syncing is needed. 5074 */ 5075 5076 if (npte2 != opte2) { 5077 pte2_store(pte2p, npte2); 5078 pmap_tlb_flush(pmap, sva); 5079 } 5080 } 5081 } 5082 if (pv_lists_locked) { 5083 sched_unpin(); 5084 rw_wunlock(&pvh_global_lock); 5085 } 5086 PMAP_UNLOCK(pmap); 5087 } 5088 5089 /* 5090 * pmap_pvh_wired_mappings: 5091 * 5092 * Return the updated number "count" of managed mappings that are wired. 5093 */ 5094 static int 5095 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 5096 { 5097 pmap_t pmap; 5098 pt1_entry_t pte1; 5099 pt2_entry_t pte2; 5100 pv_entry_t pv; 5101 5102 rw_assert(&pvh_global_lock, RA_WLOCKED); 5103 sched_pin(); 5104 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5105 pmap = PV_PMAP(pv); 5106 PMAP_LOCK(pmap); 5107 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5108 if (pte1_is_section(pte1)) { 5109 if (pte1_is_wired(pte1)) 5110 count++; 5111 } else { 5112 KASSERT(pte1_is_link(pte1), 5113 ("%s: pte1 %#x is not link", __func__, pte1)); 5114 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5115 if (pte2_is_wired(pte2)) 5116 count++; 5117 } 5118 PMAP_UNLOCK(pmap); 5119 } 5120 sched_unpin(); 5121 return (count); 5122 } 5123 5124 /* 5125 * pmap_page_wired_mappings: 5126 * 5127 * Return the number of managed mappings to the given physical page 5128 * that are wired. 5129 */ 5130 int 5131 pmap_page_wired_mappings(vm_page_t m) 5132 { 5133 int count; 5134 5135 count = 0; 5136 if ((m->oflags & VPO_UNMANAGED) != 0) 5137 return (count); 5138 rw_wlock(&pvh_global_lock); 5139 count = pmap_pvh_wired_mappings(&m->md, count); 5140 if ((m->flags & PG_FICTITIOUS) == 0) { 5141 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 5142 count); 5143 } 5144 rw_wunlock(&pvh_global_lock); 5145 return (count); 5146 } 5147 5148 /* 5149 * Returns TRUE if any of the given mappings were used to modify 5150 * physical memory. Otherwise, returns FALSE. Both page and 1mpage 5151 * mappings are supported. 5152 */ 5153 static boolean_t 5154 pmap_is_modified_pvh(struct md_page *pvh) 5155 { 5156 pv_entry_t pv; 5157 pt1_entry_t pte1; 5158 pt2_entry_t pte2; 5159 pmap_t pmap; 5160 boolean_t rv; 5161 5162 rw_assert(&pvh_global_lock, RA_WLOCKED); 5163 rv = FALSE; 5164 sched_pin(); 5165 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5166 pmap = PV_PMAP(pv); 5167 PMAP_LOCK(pmap); 5168 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5169 if (pte1_is_section(pte1)) { 5170 rv = pte1_is_dirty(pte1); 5171 } else { 5172 KASSERT(pte1_is_link(pte1), 5173 ("%s: pte1 %#x is not link", __func__, pte1)); 5174 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5175 rv = pte2_is_dirty(pte2); 5176 } 5177 PMAP_UNLOCK(pmap); 5178 if (rv) 5179 break; 5180 } 5181 sched_unpin(); 5182 return (rv); 5183 } 5184 5185 /* 5186 * pmap_is_modified: 5187 * 5188 * Return whether or not the specified physical page was modified 5189 * in any physical maps. 5190 */ 5191 boolean_t 5192 pmap_is_modified(vm_page_t m) 5193 { 5194 boolean_t rv; 5195 5196 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5197 ("%s: page %p is not managed", __func__, m)); 5198 5199 /* 5200 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5201 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 5202 * is clear, no PTE2s can have PG_M set. 5203 */ 5204 VM_OBJECT_ASSERT_WLOCKED(m->object); 5205 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5206 return (FALSE); 5207 rw_wlock(&pvh_global_lock); 5208 rv = pmap_is_modified_pvh(&m->md) || 5209 ((m->flags & PG_FICTITIOUS) == 0 && 5210 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5211 rw_wunlock(&pvh_global_lock); 5212 return (rv); 5213 } 5214 5215 /* 5216 * pmap_is_prefaultable: 5217 * 5218 * Return whether or not the specified virtual address is eligible 5219 * for prefault. 5220 */ 5221 boolean_t 5222 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 5223 { 5224 pt1_entry_t pte1; 5225 pt2_entry_t pte2; 5226 boolean_t rv; 5227 5228 rv = FALSE; 5229 PMAP_LOCK(pmap); 5230 pte1 = pte1_load(pmap_pte1(pmap, addr)); 5231 if (pte1_is_link(pte1)) { 5232 pte2 = pte2_load(pt2map_entry(addr)); 5233 rv = !pte2_is_valid(pte2) ; 5234 } 5235 PMAP_UNLOCK(pmap); 5236 return (rv); 5237 } 5238 5239 /* 5240 * Returns TRUE if any of the given mappings were referenced and FALSE 5241 * otherwise. Both page and 1mpage mappings are supported. 5242 */ 5243 static boolean_t 5244 pmap_is_referenced_pvh(struct md_page *pvh) 5245 { 5246 5247 pv_entry_t pv; 5248 pt1_entry_t pte1; 5249 pt2_entry_t pte2; 5250 pmap_t pmap; 5251 boolean_t rv; 5252 5253 rw_assert(&pvh_global_lock, RA_WLOCKED); 5254 rv = FALSE; 5255 sched_pin(); 5256 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5257 pmap = PV_PMAP(pv); 5258 PMAP_LOCK(pmap); 5259 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5260 if (pte1_is_section(pte1)) { 5261 rv = (pte1 & (PTE1_A | PTE1_V)) == (PTE1_A | PTE1_V); 5262 } else { 5263 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5264 rv = (pte2 & (PTE2_A | PTE2_V)) == (PTE2_A | PTE2_V); 5265 } 5266 PMAP_UNLOCK(pmap); 5267 if (rv) 5268 break; 5269 } 5270 sched_unpin(); 5271 return (rv); 5272 } 5273 5274 /* 5275 * pmap_is_referenced: 5276 * 5277 * Return whether or not the specified physical page was referenced 5278 * in any physical maps. 5279 */ 5280 boolean_t 5281 pmap_is_referenced(vm_page_t m) 5282 { 5283 boolean_t rv; 5284 5285 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5286 ("%s: page %p is not managed", __func__, m)); 5287 rw_wlock(&pvh_global_lock); 5288 rv = pmap_is_referenced_pvh(&m->md) || 5289 ((m->flags & PG_FICTITIOUS) == 0 && 5290 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5291 rw_wunlock(&pvh_global_lock); 5292 return (rv); 5293 } 5294 5295 /* 5296 * pmap_ts_referenced: 5297 * 5298 * Return a count of reference bits for a page, clearing those bits. 5299 * It is not necessary for every reference bit to be cleared, but it 5300 * is necessary that 0 only be returned when there are truly no 5301 * reference bits set. 5302 * 5303 * As an optimization, update the page's dirty field if a modified bit is 5304 * found while counting reference bits. This opportunistic update can be 5305 * performed at low cost and can eliminate the need for some future calls 5306 * to pmap_is_modified(). However, since this function stops after 5307 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5308 * dirty pages. Those dirty pages will only be detected by a future call 5309 * to pmap_is_modified(). 5310 */ 5311 int 5312 pmap_ts_referenced(vm_page_t m) 5313 { 5314 struct md_page *pvh; 5315 pv_entry_t pv, pvf; 5316 pmap_t pmap; 5317 pt1_entry_t *pte1p, opte1; 5318 pt2_entry_t *pte2p, opte2; 5319 vm_paddr_t pa; 5320 int rtval = 0; 5321 5322 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5323 ("%s: page %p is not managed", __func__, m)); 5324 pa = VM_PAGE_TO_PHYS(m); 5325 pvh = pa_to_pvh(pa); 5326 rw_wlock(&pvh_global_lock); 5327 sched_pin(); 5328 if ((m->flags & PG_FICTITIOUS) != 0 || 5329 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5330 goto small_mappings; 5331 pv = pvf; 5332 do { 5333 pmap = PV_PMAP(pv); 5334 PMAP_LOCK(pmap); 5335 pte1p = pmap_pte1(pmap, pv->pv_va); 5336 opte1 = pte1_load(pte1p); 5337 if (pte1_is_dirty(opte1)) { 5338 /* 5339 * Although "opte1" is mapping a 1MB page, because 5340 * this function is called at a 4KB page granularity, 5341 * we only update the 4KB page under test. 5342 */ 5343 vm_page_dirty(m); 5344 } 5345 if ((opte1 & PTE1_A) != 0) { 5346 /* 5347 * Since this reference bit is shared by 256 4KB pages, 5348 * it should not be cleared every time it is tested. 5349 * Apply a simple "hash" function on the physical page 5350 * number, the virtual section number, and the pmap 5351 * address to select one 4KB page out of the 256 5352 * on which testing the reference bit will result 5353 * in clearing that bit. This function is designed 5354 * to avoid the selection of the same 4KB page 5355 * for every 1MB page mapping. 5356 * 5357 * On demotion, a mapping that hasn't been referenced 5358 * is simply destroyed. To avoid the possibility of a 5359 * subsequent page fault on a demoted wired mapping, 5360 * always leave its reference bit set. Moreover, 5361 * since the section is wired, the current state of 5362 * its reference bit won't affect page replacement. 5363 */ 5364 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PTE1_SHIFT) ^ 5365 (uintptr_t)pmap) & (NPTE2_IN_PG - 1)) == 0 && 5366 !pte1_is_wired(opte1)) { 5367 pte1_clear_bit(pte1p, PTE1_A); 5368 pmap_tlb_flush(pmap, pv->pv_va); 5369 } 5370 rtval++; 5371 } 5372 PMAP_UNLOCK(pmap); 5373 /* Rotate the PV list if it has more than one entry. */ 5374 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5375 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5376 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5377 } 5378 if (rtval >= PMAP_TS_REFERENCED_MAX) 5379 goto out; 5380 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5381 small_mappings: 5382 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5383 goto out; 5384 pv = pvf; 5385 do { 5386 pmap = PV_PMAP(pv); 5387 PMAP_LOCK(pmap); 5388 pte1p = pmap_pte1(pmap, pv->pv_va); 5389 KASSERT(pte1_is_link(pte1_load(pte1p)), 5390 ("%s: not found a link in page %p's pv list", __func__, m)); 5391 5392 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5393 opte2 = pte2_load(pte2p); 5394 if (pte2_is_dirty(opte2)) 5395 vm_page_dirty(m); 5396 if ((opte2 & PTE2_A) != 0) { 5397 pte2_clear_bit(pte2p, PTE2_A); 5398 pmap_tlb_flush(pmap, pv->pv_va); 5399 rtval++; 5400 } 5401 PMAP_UNLOCK(pmap); 5402 /* Rotate the PV list if it has more than one entry. */ 5403 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5404 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5405 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5406 } 5407 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5408 PMAP_TS_REFERENCED_MAX); 5409 out: 5410 sched_unpin(); 5411 rw_wunlock(&pvh_global_lock); 5412 return (rtval); 5413 } 5414 5415 /* 5416 * Clear the wired attribute from the mappings for the specified range of 5417 * addresses in the given pmap. Every valid mapping within that range 5418 * must have the wired attribute set. In contrast, invalid mappings 5419 * cannot have the wired attribute set, so they are ignored. 5420 * 5421 * The wired attribute of the page table entry is not a hardware feature, 5422 * so there is no need to invalidate any TLB entries. 5423 */ 5424 void 5425 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5426 { 5427 vm_offset_t nextva; 5428 pt1_entry_t *pte1p, pte1; 5429 pt2_entry_t *pte2p, pte2; 5430 boolean_t pv_lists_locked; 5431 5432 if (pmap_is_current(pmap)) 5433 pv_lists_locked = FALSE; 5434 else { 5435 pv_lists_locked = TRUE; 5436 resume: 5437 rw_wlock(&pvh_global_lock); 5438 sched_pin(); 5439 } 5440 PMAP_LOCK(pmap); 5441 for (; sva < eva; sva = nextva) { 5442 nextva = pte1_trunc(sva + PTE1_SIZE); 5443 if (nextva < sva) 5444 nextva = eva; 5445 5446 pte1p = pmap_pte1(pmap, sva); 5447 pte1 = pte1_load(pte1p); 5448 5449 /* 5450 * Weed out invalid mappings. Note: we assume that L1 page 5451 * page table is always allocated, and in kernel virtual. 5452 */ 5453 if (pte1 == 0) 5454 continue; 5455 5456 if (pte1_is_section(pte1)) { 5457 if (!pte1_is_wired(pte1)) 5458 panic("%s: pte1 %#x not wired", __func__, pte1); 5459 5460 /* 5461 * Are we unwiring the entire large page? If not, 5462 * demote the mapping and fall through. 5463 */ 5464 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 5465 pte1_clear_bit(pte1p, PTE1_W); 5466 pmap->pm_stats.wired_count -= PTE1_SIZE / 5467 PAGE_SIZE; 5468 continue; 5469 } else { 5470 if (!pv_lists_locked) { 5471 pv_lists_locked = TRUE; 5472 if (!rw_try_wlock(&pvh_global_lock)) { 5473 PMAP_UNLOCK(pmap); 5474 /* Repeat sva. */ 5475 goto resume; 5476 } 5477 sched_pin(); 5478 } 5479 if (!pmap_demote_pte1(pmap, pte1p, sva)) 5480 panic("%s: demotion failed", __func__); 5481 #ifdef INVARIANTS 5482 else { 5483 /* Update pte1 after demotion */ 5484 pte1 = pte1_load(pte1p); 5485 } 5486 #endif 5487 } 5488 } 5489 5490 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5491 " is not link", __func__, pmap, sva, pte1, pte1p)); 5492 5493 /* 5494 * Limit our scan to either the end of the va represented 5495 * by the current L2 page table page, or to the end of the 5496 * range being protected. 5497 */ 5498 if (nextva > eva) 5499 nextva = eva; 5500 5501 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5502 sva += PAGE_SIZE) { 5503 pte2 = pte2_load(pte2p); 5504 if (!pte2_is_valid(pte2)) 5505 continue; 5506 if (!pte2_is_wired(pte2)) 5507 panic("%s: pte2 %#x is missing PTE2_W", 5508 __func__, pte2); 5509 5510 /* 5511 * PTE2_W must be cleared atomically. Although the pmap 5512 * lock synchronizes access to PTE2_W, another processor 5513 * could be changing PTE2_NM and/or PTE2_A concurrently. 5514 */ 5515 pte2_clear_bit(pte2p, PTE2_W); 5516 pmap->pm_stats.wired_count--; 5517 } 5518 } 5519 if (pv_lists_locked) { 5520 sched_unpin(); 5521 rw_wunlock(&pvh_global_lock); 5522 } 5523 PMAP_UNLOCK(pmap); 5524 } 5525 5526 /* 5527 * Clear the write and modified bits in each of the given page's mappings. 5528 */ 5529 void 5530 pmap_remove_write(vm_page_t m) 5531 { 5532 struct md_page *pvh; 5533 pv_entry_t next_pv, pv; 5534 pmap_t pmap; 5535 pt1_entry_t *pte1p; 5536 pt2_entry_t *pte2p, opte2; 5537 vm_offset_t va; 5538 5539 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5540 ("%s: page %p is not managed", __func__, m)); 5541 5542 /* 5543 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5544 * set by another thread while the object is locked. Thus, 5545 * if PGA_WRITEABLE is clear, no page table entries need updating. 5546 */ 5547 VM_OBJECT_ASSERT_WLOCKED(m->object); 5548 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5549 return; 5550 rw_wlock(&pvh_global_lock); 5551 sched_pin(); 5552 if ((m->flags & PG_FICTITIOUS) != 0) 5553 goto small_mappings; 5554 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5555 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5556 va = pv->pv_va; 5557 pmap = PV_PMAP(pv); 5558 PMAP_LOCK(pmap); 5559 pte1p = pmap_pte1(pmap, va); 5560 if (!(pte1_load(pte1p) & PTE1_RO)) 5561 (void)pmap_demote_pte1(pmap, pte1p, va); 5562 PMAP_UNLOCK(pmap); 5563 } 5564 small_mappings: 5565 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5566 pmap = PV_PMAP(pv); 5567 PMAP_LOCK(pmap); 5568 pte1p = pmap_pte1(pmap, pv->pv_va); 5569 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5570 " a section in page %p's pv list", __func__, m)); 5571 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5572 opte2 = pte2_load(pte2p); 5573 if (!(opte2 & PTE2_RO)) { 5574 pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM); 5575 if (pte2_is_dirty(opte2)) 5576 vm_page_dirty(m); 5577 pmap_tlb_flush(pmap, pv->pv_va); 5578 } 5579 PMAP_UNLOCK(pmap); 5580 } 5581 vm_page_aflag_clear(m, PGA_WRITEABLE); 5582 sched_unpin(); 5583 rw_wunlock(&pvh_global_lock); 5584 } 5585 5586 /* 5587 * Apply the given advice to the specified range of addresses within the 5588 * given pmap. Depending on the advice, clear the referenced and/or 5589 * modified flags in each mapping and set the mapped page's dirty field. 5590 */ 5591 void 5592 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 5593 { 5594 pt1_entry_t *pte1p, opte1; 5595 pt2_entry_t *pte2p, pte2; 5596 vm_offset_t pdnxt; 5597 vm_page_t m; 5598 boolean_t pv_lists_locked; 5599 5600 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5601 return; 5602 if (pmap_is_current(pmap)) 5603 pv_lists_locked = FALSE; 5604 else { 5605 pv_lists_locked = TRUE; 5606 resume: 5607 rw_wlock(&pvh_global_lock); 5608 sched_pin(); 5609 } 5610 PMAP_LOCK(pmap); 5611 for (; sva < eva; sva = pdnxt) { 5612 pdnxt = pte1_trunc(sva + PTE1_SIZE); 5613 if (pdnxt < sva) 5614 pdnxt = eva; 5615 pte1p = pmap_pte1(pmap, sva); 5616 opte1 = pte1_load(pte1p); 5617 if (!pte1_is_valid(opte1)) /* XXX */ 5618 continue; 5619 else if (pte1_is_section(opte1)) { 5620 if (!pte1_is_managed(opte1)) 5621 continue; 5622 if (!pv_lists_locked) { 5623 pv_lists_locked = TRUE; 5624 if (!rw_try_wlock(&pvh_global_lock)) { 5625 PMAP_UNLOCK(pmap); 5626 goto resume; 5627 } 5628 sched_pin(); 5629 } 5630 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 5631 /* 5632 * The large page mapping was destroyed. 5633 */ 5634 continue; 5635 } 5636 5637 /* 5638 * Unless the page mappings are wired, remove the 5639 * mapping to a single page so that a subsequent 5640 * access may repromote. Since the underlying L2 page 5641 * table is fully populated, this removal never 5642 * frees a L2 page table page. 5643 */ 5644 if (!pte1_is_wired(opte1)) { 5645 pte2p = pmap_pte2_quick(pmap, sva); 5646 KASSERT(pte2_is_valid(pte2_load(pte2p)), 5647 ("%s: invalid PTE2", __func__)); 5648 pmap_remove_pte2(pmap, pte2p, sva, NULL); 5649 } 5650 } 5651 if (pdnxt > eva) 5652 pdnxt = eva; 5653 for (pte2p = pmap_pte2_quick(pmap, sva); sva != pdnxt; pte2p++, 5654 sva += PAGE_SIZE) { 5655 pte2 = pte2_load(pte2p); 5656 if (!pte2_is_valid(pte2) || !pte2_is_managed(pte2)) 5657 continue; 5658 else if (pte2_is_dirty(pte2)) { 5659 if (advice == MADV_DONTNEED) { 5660 /* 5661 * Future calls to pmap_is_modified() 5662 * can be avoided by making the page 5663 * dirty now. 5664 */ 5665 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 5666 vm_page_dirty(m); 5667 } 5668 pte2_set_bit(pte2p, PTE2_NM); 5669 pte2_clear_bit(pte2p, PTE2_A); 5670 } else if ((pte2 & PTE2_A) != 0) 5671 pte2_clear_bit(pte2p, PTE2_A); 5672 else 5673 continue; 5674 pmap_tlb_flush(pmap, sva); 5675 } 5676 } 5677 if (pv_lists_locked) { 5678 sched_unpin(); 5679 rw_wunlock(&pvh_global_lock); 5680 } 5681 PMAP_UNLOCK(pmap); 5682 } 5683 5684 /* 5685 * Clear the modify bits on the specified physical page. 5686 */ 5687 void 5688 pmap_clear_modify(vm_page_t m) 5689 { 5690 struct md_page *pvh; 5691 pv_entry_t next_pv, pv; 5692 pmap_t pmap; 5693 pt1_entry_t *pte1p, opte1; 5694 pt2_entry_t *pte2p, opte2; 5695 vm_offset_t va; 5696 5697 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5698 ("%s: page %p is not managed", __func__, m)); 5699 VM_OBJECT_ASSERT_WLOCKED(m->object); 5700 KASSERT(!vm_page_xbusied(m), 5701 ("%s: page %p is exclusive busy", __func__, m)); 5702 5703 /* 5704 * If the page is not PGA_WRITEABLE, then no PTE2s can have PTE2_NM 5705 * cleared. If the object containing the page is locked and the page 5706 * is not exclusive busied, then PGA_WRITEABLE cannot be concurrently 5707 * set. 5708 */ 5709 if ((m->flags & PGA_WRITEABLE) == 0) 5710 return; 5711 rw_wlock(&pvh_global_lock); 5712 sched_pin(); 5713 if ((m->flags & PG_FICTITIOUS) != 0) 5714 goto small_mappings; 5715 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5716 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5717 va = pv->pv_va; 5718 pmap = PV_PMAP(pv); 5719 PMAP_LOCK(pmap); 5720 pte1p = pmap_pte1(pmap, va); 5721 opte1 = pte1_load(pte1p); 5722 if (!(opte1 & PTE1_RO)) { 5723 if (pmap_demote_pte1(pmap, pte1p, va) && 5724 !pte1_is_wired(opte1)) { 5725 /* 5726 * Write protect the mapping to a 5727 * single page so that a subsequent 5728 * write access may repromote. 5729 */ 5730 va += VM_PAGE_TO_PHYS(m) - pte1_pa(opte1); 5731 pte2p = pmap_pte2_quick(pmap, va); 5732 opte2 = pte2_load(pte2p); 5733 if ((opte2 & PTE2_V)) { 5734 pte2_set_bit(pte2p, PTE2_NM | PTE2_RO); 5735 vm_page_dirty(m); 5736 pmap_tlb_flush(pmap, va); 5737 } 5738 } 5739 } 5740 PMAP_UNLOCK(pmap); 5741 } 5742 small_mappings: 5743 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5744 pmap = PV_PMAP(pv); 5745 PMAP_LOCK(pmap); 5746 pte1p = pmap_pte1(pmap, pv->pv_va); 5747 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5748 " a section in page %p's pv list", __func__, m)); 5749 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5750 if (pte2_is_dirty(pte2_load(pte2p))) { 5751 pte2_set_bit(pte2p, PTE2_NM); 5752 pmap_tlb_flush(pmap, pv->pv_va); 5753 } 5754 PMAP_UNLOCK(pmap); 5755 } 5756 sched_unpin(); 5757 rw_wunlock(&pvh_global_lock); 5758 } 5759 5760 5761 /* 5762 * Sets the memory attribute for the specified page. 5763 */ 5764 void 5765 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5766 { 5767 pt2_entry_t *cmap2_pte2p; 5768 vm_memattr_t oma; 5769 vm_paddr_t pa; 5770 struct pcpu *pc; 5771 5772 oma = m->md.pat_mode; 5773 m->md.pat_mode = ma; 5774 5775 CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, 5776 VM_PAGE_TO_PHYS(m), oma, ma); 5777 if ((m->flags & PG_FICTITIOUS) != 0) 5778 return; 5779 #if 0 5780 /* 5781 * If "m" is a normal page, flush it from the cache. 5782 * 5783 * First, try to find an existing mapping of the page by sf 5784 * buffer. sf_buf_invalidate_cache() modifies mapping and 5785 * flushes the cache. 5786 */ 5787 if (sf_buf_invalidate_cache(m, oma)) 5788 return; 5789 #endif 5790 /* 5791 * If page is not mapped by sf buffer, map the page 5792 * transient and do invalidation. 5793 */ 5794 if (ma != oma) { 5795 pa = VM_PAGE_TO_PHYS(m); 5796 sched_pin(); 5797 pc = get_pcpu(); 5798 cmap2_pte2p = pc->pc_cmap2_pte2p; 5799 mtx_lock(&pc->pc_cmap_lock); 5800 if (pte2_load(cmap2_pte2p) != 0) 5801 panic("%s: CMAP2 busy", __func__); 5802 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 5803 vm_memattr_to_pte2(ma))); 5804 dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); 5805 pte2_clear(cmap2_pte2p); 5806 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5807 sched_unpin(); 5808 mtx_unlock(&pc->pc_cmap_lock); 5809 } 5810 } 5811 5812 /* 5813 * Miscellaneous support routines follow 5814 */ 5815 5816 /* 5817 * Returns TRUE if the given page is mapped individually or as part of 5818 * a 1mpage. Otherwise, returns FALSE. 5819 */ 5820 boolean_t 5821 pmap_page_is_mapped(vm_page_t m) 5822 { 5823 boolean_t rv; 5824 5825 if ((m->oflags & VPO_UNMANAGED) != 0) 5826 return (FALSE); 5827 rw_wlock(&pvh_global_lock); 5828 rv = !TAILQ_EMPTY(&m->md.pv_list) || 5829 ((m->flags & PG_FICTITIOUS) == 0 && 5830 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 5831 rw_wunlock(&pvh_global_lock); 5832 return (rv); 5833 } 5834 5835 /* 5836 * Returns true if the pmap's pv is one of the first 5837 * 16 pvs linked to from this page. This count may 5838 * be changed upwards or downwards in the future; it 5839 * is only necessary that true be returned for a small 5840 * subset of pmaps for proper page aging. 5841 */ 5842 boolean_t 5843 pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 5844 { 5845 struct md_page *pvh; 5846 pv_entry_t pv; 5847 int loops = 0; 5848 boolean_t rv; 5849 5850 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5851 ("%s: page %p is not managed", __func__, m)); 5852 rv = FALSE; 5853 rw_wlock(&pvh_global_lock); 5854 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5855 if (PV_PMAP(pv) == pmap) { 5856 rv = TRUE; 5857 break; 5858 } 5859 loops++; 5860 if (loops >= 16) 5861 break; 5862 } 5863 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 5864 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5865 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5866 if (PV_PMAP(pv) == pmap) { 5867 rv = TRUE; 5868 break; 5869 } 5870 loops++; 5871 if (loops >= 16) 5872 break; 5873 } 5874 } 5875 rw_wunlock(&pvh_global_lock); 5876 return (rv); 5877 } 5878 5879 /* 5880 * pmap_zero_page zeros the specified hardware page by mapping 5881 * the page into KVM and using bzero to clear its contents. 5882 */ 5883 void 5884 pmap_zero_page(vm_page_t m) 5885 { 5886 pt2_entry_t *cmap2_pte2p; 5887 struct pcpu *pc; 5888 5889 sched_pin(); 5890 pc = get_pcpu(); 5891 cmap2_pte2p = pc->pc_cmap2_pte2p; 5892 mtx_lock(&pc->pc_cmap_lock); 5893 if (pte2_load(cmap2_pte2p) != 0) 5894 panic("%s: CMAP2 busy", __func__); 5895 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5896 vm_page_pte2_attr(m))); 5897 pagezero(pc->pc_cmap2_addr); 5898 pte2_clear(cmap2_pte2p); 5899 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5900 sched_unpin(); 5901 mtx_unlock(&pc->pc_cmap_lock); 5902 } 5903 5904 /* 5905 * pmap_zero_page_area zeros the specified hardware page by mapping 5906 * the page into KVM and using bzero to clear its contents. 5907 * 5908 * off and size may not cover an area beyond a single hardware page. 5909 */ 5910 void 5911 pmap_zero_page_area(vm_page_t m, int off, int size) 5912 { 5913 pt2_entry_t *cmap2_pte2p; 5914 struct pcpu *pc; 5915 5916 sched_pin(); 5917 pc = get_pcpu(); 5918 cmap2_pte2p = pc->pc_cmap2_pte2p; 5919 mtx_lock(&pc->pc_cmap_lock); 5920 if (pte2_load(cmap2_pte2p) != 0) 5921 panic("%s: CMAP2 busy", __func__); 5922 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5923 vm_page_pte2_attr(m))); 5924 if (off == 0 && size == PAGE_SIZE) 5925 pagezero(pc->pc_cmap2_addr); 5926 else 5927 bzero(pc->pc_cmap2_addr + off, size); 5928 pte2_clear(cmap2_pte2p); 5929 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5930 sched_unpin(); 5931 mtx_unlock(&pc->pc_cmap_lock); 5932 } 5933 5934 /* 5935 * pmap_copy_page copies the specified (machine independent) 5936 * page by mapping the page into virtual memory and using 5937 * bcopy to copy the page, one machine dependent page at a 5938 * time. 5939 */ 5940 void 5941 pmap_copy_page(vm_page_t src, vm_page_t dst) 5942 { 5943 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5944 struct pcpu *pc; 5945 5946 sched_pin(); 5947 pc = get_pcpu(); 5948 cmap1_pte2p = pc->pc_cmap1_pte2p; 5949 cmap2_pte2p = pc->pc_cmap2_pte2p; 5950 mtx_lock(&pc->pc_cmap_lock); 5951 if (pte2_load(cmap1_pte2p) != 0) 5952 panic("%s: CMAP1 busy", __func__); 5953 if (pte2_load(cmap2_pte2p) != 0) 5954 panic("%s: CMAP2 busy", __func__); 5955 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src), 5956 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src))); 5957 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst), 5958 PTE2_AP_KRW, vm_page_pte2_attr(dst))); 5959 bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE); 5960 pte2_clear(cmap1_pte2p); 5961 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5962 pte2_clear(cmap2_pte2p); 5963 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5964 sched_unpin(); 5965 mtx_unlock(&pc->pc_cmap_lock); 5966 } 5967 5968 int unmapped_buf_allowed = 1; 5969 5970 void 5971 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 5972 vm_offset_t b_offset, int xfersize) 5973 { 5974 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5975 vm_page_t a_pg, b_pg; 5976 char *a_cp, *b_cp; 5977 vm_offset_t a_pg_offset, b_pg_offset; 5978 struct pcpu *pc; 5979 int cnt; 5980 5981 sched_pin(); 5982 pc = get_pcpu(); 5983 cmap1_pte2p = pc->pc_cmap1_pte2p; 5984 cmap2_pte2p = pc->pc_cmap2_pte2p; 5985 mtx_lock(&pc->pc_cmap_lock); 5986 if (pte2_load(cmap1_pte2p) != 0) 5987 panic("pmap_copy_pages: CMAP1 busy"); 5988 if (pte2_load(cmap2_pte2p) != 0) 5989 panic("pmap_copy_pages: CMAP2 busy"); 5990 while (xfersize > 0) { 5991 a_pg = ma[a_offset >> PAGE_SHIFT]; 5992 a_pg_offset = a_offset & PAGE_MASK; 5993 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 5994 b_pg = mb[b_offset >> PAGE_SHIFT]; 5995 b_pg_offset = b_offset & PAGE_MASK; 5996 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 5997 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg), 5998 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg))); 5999 tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr); 6000 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg), 6001 PTE2_AP_KRW, vm_page_pte2_attr(b_pg))); 6002 tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr); 6003 a_cp = pc->pc_cmap1_addr + a_pg_offset; 6004 b_cp = pc->pc_cmap2_addr + b_pg_offset; 6005 bcopy(a_cp, b_cp, cnt); 6006 a_offset += cnt; 6007 b_offset += cnt; 6008 xfersize -= cnt; 6009 } 6010 pte2_clear(cmap1_pte2p); 6011 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 6012 pte2_clear(cmap2_pte2p); 6013 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6014 sched_unpin(); 6015 mtx_unlock(&pc->pc_cmap_lock); 6016 } 6017 6018 vm_offset_t 6019 pmap_quick_enter_page(vm_page_t m) 6020 { 6021 struct pcpu *pc; 6022 pt2_entry_t *pte2p; 6023 6024 critical_enter(); 6025 pc = get_pcpu(); 6026 pte2p = pc->pc_qmap_pte2p; 6027 6028 KASSERT(pte2_load(pte2p) == 0, ("%s: PTE2 busy", __func__)); 6029 6030 pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 6031 vm_page_pte2_attr(m))); 6032 return (pc->pc_qmap_addr); 6033 } 6034 6035 void 6036 pmap_quick_remove_page(vm_offset_t addr) 6037 { 6038 struct pcpu *pc; 6039 pt2_entry_t *pte2p; 6040 6041 pc = get_pcpu(); 6042 pte2p = pc->pc_qmap_pte2p; 6043 6044 KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__)); 6045 KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__)); 6046 6047 pte2_clear(pte2p); 6048 tlb_flush(pc->pc_qmap_addr); 6049 critical_exit(); 6050 } 6051 6052 /* 6053 * Copy the range specified by src_addr/len 6054 * from the source map to the range dst_addr/len 6055 * in the destination map. 6056 * 6057 * This routine is only advisory and need not do anything. 6058 */ 6059 void 6060 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 6061 vm_offset_t src_addr) 6062 { 6063 struct spglist free; 6064 vm_offset_t addr; 6065 vm_offset_t end_addr = src_addr + len; 6066 vm_offset_t nextva; 6067 6068 if (dst_addr != src_addr) 6069 return; 6070 6071 if (!pmap_is_current(src_pmap)) 6072 return; 6073 6074 rw_wlock(&pvh_global_lock); 6075 if (dst_pmap < src_pmap) { 6076 PMAP_LOCK(dst_pmap); 6077 PMAP_LOCK(src_pmap); 6078 } else { 6079 PMAP_LOCK(src_pmap); 6080 PMAP_LOCK(dst_pmap); 6081 } 6082 sched_pin(); 6083 for (addr = src_addr; addr < end_addr; addr = nextva) { 6084 pt2_entry_t *src_pte2p, *dst_pte2p; 6085 vm_page_t dst_mpt2pg, src_mpt2pg; 6086 pt1_entry_t src_pte1; 6087 u_int pte1_idx; 6088 6089 KASSERT(addr < VM_MAXUSER_ADDRESS, 6090 ("%s: invalid to pmap_copy page tables", __func__)); 6091 6092 nextva = pte1_trunc(addr + PTE1_SIZE); 6093 if (nextva < addr) 6094 nextva = end_addr; 6095 6096 pte1_idx = pte1_index(addr); 6097 src_pte1 = src_pmap->pm_pt1[pte1_idx]; 6098 if (pte1_is_section(src_pte1)) { 6099 if ((addr & PTE1_OFFSET) != 0 || 6100 (addr + PTE1_SIZE) > end_addr) 6101 continue; 6102 if (dst_pmap->pm_pt1[pte1_idx] == 0 && 6103 (!pte1_is_managed(src_pte1) || 6104 pmap_pv_insert_pte1(dst_pmap, addr, src_pte1, 6105 PMAP_ENTER_NORECLAIM))) { 6106 dst_pmap->pm_pt1[pte1_idx] = src_pte1 & 6107 ~PTE1_W; 6108 dst_pmap->pm_stats.resident_count += 6109 PTE1_SIZE / PAGE_SIZE; 6110 pmap_pte1_mappings++; 6111 } 6112 continue; 6113 } else if (!pte1_is_link(src_pte1)) 6114 continue; 6115 6116 src_mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(src_pte1)); 6117 6118 /* 6119 * We leave PT2s to be linked from PT1 even if they are not 6120 * referenced until all PT2s in a page are without reference. 6121 * 6122 * QQQ: It could be changed ... 6123 */ 6124 #if 0 /* single_pt2_link_is_cleared */ 6125 KASSERT(pt2_wirecount_get(src_mpt2pg, pte1_idx) > 0, 6126 ("%s: source page table page is unused", __func__)); 6127 #else 6128 if (pt2_wirecount_get(src_mpt2pg, pte1_idx) == 0) 6129 continue; 6130 #endif 6131 if (nextva > end_addr) 6132 nextva = end_addr; 6133 6134 src_pte2p = pt2map_entry(addr); 6135 while (addr < nextva) { 6136 pt2_entry_t temp_pte2; 6137 temp_pte2 = pte2_load(src_pte2p); 6138 /* 6139 * we only virtual copy managed pages 6140 */ 6141 if (pte2_is_managed(temp_pte2)) { 6142 dst_mpt2pg = pmap_allocpte2(dst_pmap, addr, 6143 PMAP_ENTER_NOSLEEP); 6144 if (dst_mpt2pg == NULL) 6145 goto out; 6146 dst_pte2p = pmap_pte2_quick(dst_pmap, addr); 6147 if (!pte2_is_valid(pte2_load(dst_pte2p)) && 6148 pmap_try_insert_pv_entry(dst_pmap, addr, 6149 PHYS_TO_VM_PAGE(pte2_pa(temp_pte2)))) { 6150 /* 6151 * Clear the wired, modified, and 6152 * accessed (referenced) bits 6153 * during the copy. 6154 */ 6155 temp_pte2 &= ~(PTE2_W | PTE2_A); 6156 temp_pte2 |= PTE2_NM; 6157 pte2_store(dst_pte2p, temp_pte2); 6158 dst_pmap->pm_stats.resident_count++; 6159 } else { 6160 SLIST_INIT(&free); 6161 if (pmap_unwire_pt2(dst_pmap, addr, 6162 dst_mpt2pg, &free)) { 6163 pmap_tlb_flush(dst_pmap, addr); 6164 vm_page_free_pages_toq(&free, 6165 false); 6166 } 6167 goto out; 6168 } 6169 if (pt2_wirecount_get(dst_mpt2pg, pte1_idx) >= 6170 pt2_wirecount_get(src_mpt2pg, pte1_idx)) 6171 break; 6172 } 6173 addr += PAGE_SIZE; 6174 src_pte2p++; 6175 } 6176 } 6177 out: 6178 sched_unpin(); 6179 rw_wunlock(&pvh_global_lock); 6180 PMAP_UNLOCK(src_pmap); 6181 PMAP_UNLOCK(dst_pmap); 6182 } 6183 6184 /* 6185 * Increase the starting virtual address of the given mapping if a 6186 * different alignment might result in more section mappings. 6187 */ 6188 void 6189 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 6190 vm_offset_t *addr, vm_size_t size) 6191 { 6192 vm_offset_t pte1_offset; 6193 6194 if (size < PTE1_SIZE) 6195 return; 6196 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 6197 offset += ptoa(object->pg_color); 6198 pte1_offset = offset & PTE1_OFFSET; 6199 if (size - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) < PTE1_SIZE || 6200 (*addr & PTE1_OFFSET) == pte1_offset) 6201 return; 6202 if ((*addr & PTE1_OFFSET) < pte1_offset) 6203 *addr = pte1_trunc(*addr) + pte1_offset; 6204 else 6205 *addr = pte1_roundup(*addr) + pte1_offset; 6206 } 6207 6208 void 6209 pmap_activate(struct thread *td) 6210 { 6211 pmap_t pmap, oldpmap; 6212 u_int cpuid, ttb; 6213 6214 PDEBUG(9, printf("%s: td = %08x\n", __func__, (uint32_t)td)); 6215 6216 critical_enter(); 6217 pmap = vmspace_pmap(td->td_proc->p_vmspace); 6218 oldpmap = PCPU_GET(curpmap); 6219 cpuid = PCPU_GET(cpuid); 6220 6221 #if defined(SMP) 6222 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 6223 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 6224 #else 6225 CPU_CLR(cpuid, &oldpmap->pm_active); 6226 CPU_SET(cpuid, &pmap->pm_active); 6227 #endif 6228 6229 ttb = pmap_ttb_get(pmap); 6230 6231 /* 6232 * pmap_activate is for the current thread on the current cpu 6233 */ 6234 td->td_pcb->pcb_pagedir = ttb; 6235 cp15_ttbr_set(ttb); 6236 PCPU_SET(curpmap, pmap); 6237 critical_exit(); 6238 } 6239 6240 /* 6241 * Perform the pmap work for mincore. 6242 */ 6243 int 6244 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 6245 { 6246 pt1_entry_t *pte1p, pte1; 6247 pt2_entry_t *pte2p, pte2; 6248 vm_paddr_t pa; 6249 bool managed; 6250 int val; 6251 6252 PMAP_LOCK(pmap); 6253 retry: 6254 pte1p = pmap_pte1(pmap, addr); 6255 pte1 = pte1_load(pte1p); 6256 if (pte1_is_section(pte1)) { 6257 pa = trunc_page(pte1_pa(pte1) | (addr & PTE1_OFFSET)); 6258 managed = pte1_is_managed(pte1); 6259 val = MINCORE_SUPER | MINCORE_INCORE; 6260 if (pte1_is_dirty(pte1)) 6261 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6262 if (pte1 & PTE1_A) 6263 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6264 } else if (pte1_is_link(pte1)) { 6265 pte2p = pmap_pte2(pmap, addr); 6266 pte2 = pte2_load(pte2p); 6267 pmap_pte2_release(pte2p); 6268 pa = pte2_pa(pte2); 6269 managed = pte2_is_managed(pte2); 6270 val = MINCORE_INCORE; 6271 if (pte2_is_dirty(pte2)) 6272 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6273 if (pte2 & PTE2_A) 6274 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6275 } else { 6276 managed = false; 6277 val = 0; 6278 } 6279 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 6280 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 6281 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 6282 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 6283 goto retry; 6284 } else 6285 PA_UNLOCK_COND(*locked_pa); 6286 PMAP_UNLOCK(pmap); 6287 return (val); 6288 } 6289 6290 void 6291 pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) 6292 { 6293 vm_offset_t sva; 6294 uint32_t l2attr; 6295 6296 KASSERT((size & PAGE_MASK) == 0, 6297 ("%s: device mapping not page-sized", __func__)); 6298 6299 sva = va; 6300 l2attr = vm_memattr_to_pte2(VM_MEMATTR_DEVICE); 6301 while (size != 0) { 6302 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, l2attr); 6303 va += PAGE_SIZE; 6304 pa += PAGE_SIZE; 6305 size -= PAGE_SIZE; 6306 } 6307 tlb_flush_range(sva, va - sva); 6308 } 6309 6310 void 6311 pmap_kremove_device(vm_offset_t va, vm_size_t size) 6312 { 6313 vm_offset_t sva; 6314 6315 KASSERT((size & PAGE_MASK) == 0, 6316 ("%s: device mapping not page-sized", __func__)); 6317 6318 sva = va; 6319 while (size != 0) { 6320 pmap_kremove(va); 6321 va += PAGE_SIZE; 6322 size -= PAGE_SIZE; 6323 } 6324 tlb_flush_range(sva, va - sva); 6325 } 6326 6327 void 6328 pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb) 6329 { 6330 6331 pcb->pcb_pagedir = pmap_ttb_get(pmap); 6332 } 6333 6334 6335 /* 6336 * Clean L1 data cache range by physical address. 6337 * The range must be within a single page. 6338 */ 6339 static void 6340 pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr) 6341 { 6342 pt2_entry_t *cmap2_pte2p; 6343 struct pcpu *pc; 6344 6345 KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE, 6346 ("%s: not on single page", __func__)); 6347 6348 sched_pin(); 6349 pc = get_pcpu(); 6350 cmap2_pte2p = pc->pc_cmap2_pte2p; 6351 mtx_lock(&pc->pc_cmap_lock); 6352 if (pte2_load(cmap2_pte2p) != 0) 6353 panic("%s: CMAP2 busy", __func__); 6354 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr)); 6355 dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size); 6356 pte2_clear(cmap2_pte2p); 6357 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6358 sched_unpin(); 6359 mtx_unlock(&pc->pc_cmap_lock); 6360 } 6361 6362 /* 6363 * Sync instruction cache range which is not mapped yet. 6364 */ 6365 void 6366 cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 6367 { 6368 uint32_t len, offset; 6369 vm_page_t m; 6370 6371 /* Write back d-cache on given address range. */ 6372 offset = pa & PAGE_MASK; 6373 for ( ; size != 0; size -= len, pa += len, offset = 0) { 6374 len = min(PAGE_SIZE - offset, size); 6375 m = PHYS_TO_VM_PAGE(pa); 6376 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6377 __func__, pa)); 6378 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6379 } 6380 /* 6381 * I-cache is VIPT. Only way how to flush all virtual mappings 6382 * on given physical address is to invalidate all i-cache. 6383 */ 6384 icache_inv_all(); 6385 } 6386 6387 void 6388 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t size) 6389 { 6390 6391 /* Write back d-cache on given address range. */ 6392 if (va >= VM_MIN_KERNEL_ADDRESS) { 6393 dcache_wb_pou(va, size); 6394 } else { 6395 uint32_t len, offset; 6396 vm_paddr_t pa; 6397 vm_page_t m; 6398 6399 offset = va & PAGE_MASK; 6400 for ( ; size != 0; size -= len, va += len, offset = 0) { 6401 pa = pmap_extract(pmap, va); /* offset is preserved */ 6402 len = min(PAGE_SIZE - offset, size); 6403 m = PHYS_TO_VM_PAGE(pa); 6404 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6405 __func__, pa)); 6406 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6407 } 6408 } 6409 /* 6410 * I-cache is VIPT. Only way how to flush all virtual mappings 6411 * on given physical address is to invalidate all i-cache. 6412 */ 6413 icache_inv_all(); 6414 } 6415 6416 /* 6417 * The implementation of pmap_fault() uses IN_RANGE2() macro which 6418 * depends on the fact that given range size is a power of 2. 6419 */ 6420 CTASSERT(powerof2(NB_IN_PT1)); 6421 CTASSERT(powerof2(PT2MAP_SIZE)); 6422 6423 #define IN_RANGE2(addr, start, size) \ 6424 ((vm_offset_t)(start) == ((vm_offset_t)(addr) & ~((size) - 1))) 6425 6426 /* 6427 * Handle access and R/W emulation faults. 6428 */ 6429 int 6430 pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode) 6431 { 6432 pt1_entry_t *pte1p, pte1; 6433 pt2_entry_t *pte2p, pte2; 6434 6435 if (pmap == NULL) 6436 pmap = kernel_pmap; 6437 6438 /* 6439 * In kernel, we should never get abort with FAR which is in range of 6440 * pmap->pm_pt1 or PT2MAP address spaces. If it happens, stop here 6441 * and print out a useful abort message and even get to the debugger 6442 * otherwise it likely ends with never ending loop of aborts. 6443 */ 6444 if (__predict_false(IN_RANGE2(far, pmap->pm_pt1, NB_IN_PT1))) { 6445 /* 6446 * All L1 tables should always be mapped and present. 6447 * However, we check only current one herein. For user mode, 6448 * only permission abort from malicious user is not fatal. 6449 * And alignment abort as it may have higher priority. 6450 */ 6451 if (!usermode || (idx != FAULT_ALIGN && idx != FAULT_PERM_L2)) { 6452 CTR4(KTR_PMAP, "%s: pmap %#x pm_pt1 %#x far %#x", 6453 __func__, pmap, pmap->pm_pt1, far); 6454 panic("%s: pm_pt1 abort", __func__); 6455 } 6456 return (KERN_INVALID_ADDRESS); 6457 } 6458 if (__predict_false(IN_RANGE2(far, PT2MAP, PT2MAP_SIZE))) { 6459 /* 6460 * PT2MAP should be always mapped and present in current 6461 * L1 table. However, only existing L2 tables are mapped 6462 * in PT2MAP. For user mode, only L2 translation abort and 6463 * permission abort from malicious user is not fatal. 6464 * And alignment abort as it may have higher priority. 6465 */ 6466 if (!usermode || (idx != FAULT_ALIGN && 6467 idx != FAULT_TRAN_L2 && idx != FAULT_PERM_L2)) { 6468 CTR4(KTR_PMAP, "%s: pmap %#x PT2MAP %#x far %#x", 6469 __func__, pmap, PT2MAP, far); 6470 panic("%s: PT2MAP abort", __func__); 6471 } 6472 return (KERN_INVALID_ADDRESS); 6473 } 6474 6475 /* 6476 * A pmap lock is used below for handling of access and R/W emulation 6477 * aborts. They were handled by atomic operations before so some 6478 * analysis of new situation is needed to answer the following question: 6479 * Is it safe to use the lock even for these aborts? 6480 * 6481 * There may happen two cases in general: 6482 * 6483 * (1) Aborts while the pmap lock is locked already - this should not 6484 * happen as pmap lock is not recursive. However, under pmap lock only 6485 * internal kernel data should be accessed and such data should be 6486 * mapped with A bit set and NM bit cleared. If double abort happens, 6487 * then a mapping of data which has caused it must be fixed. Further, 6488 * all new mappings are always made with A bit set and the bit can be 6489 * cleared only on managed mappings. 6490 * 6491 * (2) Aborts while another lock(s) is/are locked - this already can 6492 * happen. However, there is no difference here if it's either access or 6493 * R/W emulation abort, or if it's some other abort. 6494 */ 6495 6496 PMAP_LOCK(pmap); 6497 #ifdef INVARIANTS 6498 pte1 = pte1_load(pmap_pte1(pmap, far)); 6499 if (pte1_is_link(pte1)) { 6500 /* 6501 * Check in advance that associated L2 page table is mapped into 6502 * PT2MAP space. Note that faulty access to not mapped L2 page 6503 * table is caught in more general check above where "far" is 6504 * checked that it does not lay in PT2MAP space. Note also that 6505 * L1 page table and PT2TAB always exist and are mapped. 6506 */ 6507 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far)); 6508 if (!pte2_is_valid(pte2)) 6509 panic("%s: missing L2 page table (%p, %#x)", 6510 __func__, pmap, far); 6511 } 6512 #endif 6513 #ifdef SMP 6514 /* 6515 * Special treatment is due to break-before-make approach done when 6516 * pte1 is updated for userland mapping during section promotion or 6517 * demotion. If not caught here, pmap_enter() can find a section 6518 * mapping on faulting address. That is not allowed. 6519 */ 6520 if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) { 6521 PMAP_UNLOCK(pmap); 6522 return (KERN_SUCCESS); 6523 } 6524 #endif 6525 /* 6526 * Accesss bits for page and section. Note that the entry 6527 * is not in TLB yet, so TLB flush is not necessary. 6528 * 6529 * QQQ: This is hardware emulation, we do not call userret() 6530 * for aborts from user mode. 6531 */ 6532 if (idx == FAULT_ACCESS_L2) { 6533 pte1 = pte1_load(pmap_pte1(pmap, far)); 6534 if (pte1_is_link(pte1)) { 6535 /* L2 page table should exist and be mapped. */ 6536 pte2p = pt2map_entry(far); 6537 pte2 = pte2_load(pte2p); 6538 if (pte2_is_valid(pte2)) { 6539 pte2_store(pte2p, pte2 | PTE2_A); 6540 PMAP_UNLOCK(pmap); 6541 return (KERN_SUCCESS); 6542 } 6543 } else { 6544 /* 6545 * We got L2 access fault but PTE1 is not a link. 6546 * Probably some race happened, do nothing. 6547 */ 6548 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L2 - pmap %#x far %#x", 6549 __func__, pmap, far); 6550 PMAP_UNLOCK(pmap); 6551 return (KERN_SUCCESS); 6552 } 6553 } 6554 if (idx == FAULT_ACCESS_L1) { 6555 pte1p = pmap_pte1(pmap, far); 6556 pte1 = pte1_load(pte1p); 6557 if (pte1_is_section(pte1)) { 6558 pte1_store(pte1p, pte1 | PTE1_A); 6559 PMAP_UNLOCK(pmap); 6560 return (KERN_SUCCESS); 6561 } else { 6562 /* 6563 * We got L1 access fault but PTE1 is not section 6564 * mapping. Probably some race happened, do nothing. 6565 */ 6566 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L1 - pmap %#x far %#x", 6567 __func__, pmap, far); 6568 PMAP_UNLOCK(pmap); 6569 return (KERN_SUCCESS); 6570 } 6571 } 6572 6573 /* 6574 * Handle modify bits for page and section. Note that the modify 6575 * bit is emulated by software. So PTEx_RO is software read only 6576 * bit and PTEx_NM flag is real hardware read only bit. 6577 * 6578 * QQQ: This is hardware emulation, we do not call userret() 6579 * for aborts from user mode. 6580 */ 6581 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) { 6582 pte1 = pte1_load(pmap_pte1(pmap, far)); 6583 if (pte1_is_link(pte1)) { 6584 /* L2 page table should exist and be mapped. */ 6585 pte2p = pt2map_entry(far); 6586 pte2 = pte2_load(pte2p); 6587 if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) && 6588 (pte2 & PTE2_NM)) { 6589 pte2_store(pte2p, pte2 & ~PTE2_NM); 6590 tlb_flush(trunc_page(far)); 6591 PMAP_UNLOCK(pmap); 6592 return (KERN_SUCCESS); 6593 } 6594 } else { 6595 /* 6596 * We got L2 permission fault but PTE1 is not a link. 6597 * Probably some race happened, do nothing. 6598 */ 6599 CTR3(KTR_PMAP, "%s: FAULT_PERM_L2 - pmap %#x far %#x", 6600 __func__, pmap, far); 6601 PMAP_UNLOCK(pmap); 6602 return (KERN_SUCCESS); 6603 } 6604 } 6605 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) { 6606 pte1p = pmap_pte1(pmap, far); 6607 pte1 = pte1_load(pte1p); 6608 if (pte1_is_section(pte1)) { 6609 if (!(pte1 & PTE1_RO) && (pte1 & PTE1_NM)) { 6610 pte1_store(pte1p, pte1 & ~PTE1_NM); 6611 tlb_flush(pte1_trunc(far)); 6612 PMAP_UNLOCK(pmap); 6613 return (KERN_SUCCESS); 6614 } 6615 } else { 6616 /* 6617 * We got L1 permission fault but PTE1 is not section 6618 * mapping. Probably some race happened, do nothing. 6619 */ 6620 CTR3(KTR_PMAP, "%s: FAULT_PERM_L1 - pmap %#x far %#x", 6621 __func__, pmap, far); 6622 PMAP_UNLOCK(pmap); 6623 return (KERN_SUCCESS); 6624 } 6625 } 6626 6627 /* 6628 * QQQ: The previous code, mainly fast handling of access and 6629 * modify bits aborts, could be moved to ASM. Now we are 6630 * starting to deal with not fast aborts. 6631 */ 6632 PMAP_UNLOCK(pmap); 6633 return (KERN_FAILURE); 6634 } 6635 6636 #if defined(PMAP_DEBUG) 6637 /* 6638 * Reusing of KVA used in pmap_zero_page function !!! 6639 */ 6640 static void 6641 pmap_zero_page_check(vm_page_t m) 6642 { 6643 pt2_entry_t *cmap2_pte2p; 6644 uint32_t *p, *end; 6645 struct pcpu *pc; 6646 6647 sched_pin(); 6648 pc = get_pcpu(); 6649 cmap2_pte2p = pc->pc_cmap2_pte2p; 6650 mtx_lock(&pc->pc_cmap_lock); 6651 if (pte2_load(cmap2_pte2p) != 0) 6652 panic("%s: CMAP2 busy", __func__); 6653 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 6654 vm_page_pte2_attr(m))); 6655 end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE); 6656 for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++) 6657 if (*p != 0) 6658 panic("%s: page %p not zero, va: %p", __func__, m, 6659 pc->pc_cmap2_addr); 6660 pte2_clear(cmap2_pte2p); 6661 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6662 sched_unpin(); 6663 mtx_unlock(&pc->pc_cmap_lock); 6664 } 6665 6666 int 6667 pmap_pid_dump(int pid) 6668 { 6669 pmap_t pmap; 6670 struct proc *p; 6671 int npte2 = 0; 6672 int i, j, index; 6673 6674 sx_slock(&allproc_lock); 6675 FOREACH_PROC_IN_SYSTEM(p) { 6676 if (p->p_pid != pid || p->p_vmspace == NULL) 6677 continue; 6678 index = 0; 6679 pmap = vmspace_pmap(p->p_vmspace); 6680 for (i = 0; i < NPTE1_IN_PT1; i++) { 6681 pt1_entry_t pte1; 6682 pt2_entry_t *pte2p, pte2; 6683 vm_offset_t base, va; 6684 vm_paddr_t pa; 6685 vm_page_t m; 6686 6687 base = i << PTE1_SHIFT; 6688 pte1 = pte1_load(&pmap->pm_pt1[i]); 6689 6690 if (pte1_is_section(pte1)) { 6691 /* 6692 * QQQ: Do something here! 6693 */ 6694 } else if (pte1_is_link(pte1)) { 6695 for (j = 0; j < NPTE2_IN_PT2; j++) { 6696 va = base + (j << PAGE_SHIFT); 6697 if (va >= VM_MIN_KERNEL_ADDRESS) { 6698 if (index) { 6699 index = 0; 6700 printf("\n"); 6701 } 6702 sx_sunlock(&allproc_lock); 6703 return (npte2); 6704 } 6705 pte2p = pmap_pte2(pmap, va); 6706 pte2 = pte2_load(pte2p); 6707 pmap_pte2_release(pte2p); 6708 if (!pte2_is_valid(pte2)) 6709 continue; 6710 6711 pa = pte2_pa(pte2); 6712 m = PHYS_TO_VM_PAGE(pa); 6713 printf("va: 0x%x, pa: 0x%x, h: %d, w:" 6714 " %d, f: 0x%x", va, pa, 6715 m->hold_count, m->wire_count, 6716 m->flags); 6717 npte2++; 6718 index++; 6719 if (index >= 2) { 6720 index = 0; 6721 printf("\n"); 6722 } else { 6723 printf(" "); 6724 } 6725 } 6726 } 6727 } 6728 } 6729 sx_sunlock(&allproc_lock); 6730 return (npte2); 6731 } 6732 6733 #endif 6734 6735 #ifdef DDB 6736 static pt2_entry_t * 6737 pmap_pte2_ddb(pmap_t pmap, vm_offset_t va) 6738 { 6739 pt1_entry_t pte1; 6740 vm_paddr_t pt2pg_pa; 6741 6742 pte1 = pte1_load(pmap_pte1(pmap, va)); 6743 if (!pte1_is_link(pte1)) 6744 return (NULL); 6745 6746 if (pmap_is_current(pmap)) 6747 return (pt2map_entry(va)); 6748 6749 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 6750 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 6751 if (pte2_pa(pte2_load(PMAP3)) != pt2pg_pa) { 6752 pte2_store(PMAP3, PTE2_KPT(pt2pg_pa)); 6753 #ifdef SMP 6754 PMAP3cpu = PCPU_GET(cpuid); 6755 #endif 6756 tlb_flush_local((vm_offset_t)PADDR3); 6757 } 6758 #ifdef SMP 6759 else if (PMAP3cpu != PCPU_GET(cpuid)) { 6760 PMAP3cpu = PCPU_GET(cpuid); 6761 tlb_flush_local((vm_offset_t)PADDR3); 6762 } 6763 #endif 6764 return (PADDR3 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 6765 } 6766 6767 static void 6768 dump_pmap(pmap_t pmap) 6769 { 6770 6771 printf("pmap %p\n", pmap); 6772 printf(" pm_pt1: %p\n", pmap->pm_pt1); 6773 printf(" pm_pt2tab: %p\n", pmap->pm_pt2tab); 6774 printf(" pm_active: 0x%08lX\n", pmap->pm_active.__bits[0]); 6775 } 6776 6777 DB_SHOW_COMMAND(pmaps, pmap_list_pmaps) 6778 { 6779 6780 pmap_t pmap; 6781 LIST_FOREACH(pmap, &allpmaps, pm_list) { 6782 dump_pmap(pmap); 6783 } 6784 } 6785 6786 static int 6787 pte2_class(pt2_entry_t pte2) 6788 { 6789 int cls; 6790 6791 cls = (pte2 >> 2) & 0x03; 6792 cls |= (pte2 >> 4) & 0x04; 6793 return (cls); 6794 } 6795 6796 static void 6797 dump_section(pmap_t pmap, uint32_t pte1_idx) 6798 { 6799 } 6800 6801 static void 6802 dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok) 6803 { 6804 uint32_t i; 6805 vm_offset_t va; 6806 pt2_entry_t *pte2p, pte2; 6807 vm_page_t m; 6808 6809 va = pte1_idx << PTE1_SHIFT; 6810 pte2p = pmap_pte2_ddb(pmap, va); 6811 for (i = 0; i < NPTE2_IN_PT2; i++, pte2p++, va += PAGE_SIZE) { 6812 pte2 = pte2_load(pte2p); 6813 if (pte2 == 0) 6814 continue; 6815 if (!pte2_is_valid(pte2)) { 6816 printf(" 0x%08X: 0x%08X", va, pte2); 6817 if (!invalid_ok) 6818 printf(" - not valid !!!"); 6819 printf("\n"); 6820 continue; 6821 } 6822 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 6823 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2, 6824 pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m); 6825 if (m != NULL) { 6826 printf(" v:%d h:%d w:%d f:0x%04X\n", m->valid, 6827 m->hold_count, m->wire_count, m->flags); 6828 } else { 6829 printf("\n"); 6830 } 6831 } 6832 } 6833 6834 static __inline boolean_t 6835 is_pv_chunk_space(vm_offset_t va) 6836 { 6837 6838 if ((((vm_offset_t)pv_chunkbase) <= va) && 6839 (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks))) 6840 return (TRUE); 6841 return (FALSE); 6842 } 6843 6844 DB_SHOW_COMMAND(pmap, pmap_pmap_print) 6845 { 6846 /* XXX convert args. */ 6847 pmap_t pmap = (pmap_t)addr; 6848 pt1_entry_t pte1; 6849 pt2_entry_t pte2; 6850 vm_offset_t va, eva; 6851 vm_page_t m; 6852 uint32_t i; 6853 boolean_t invalid_ok, dump_link_ok, dump_pv_chunk; 6854 6855 if (have_addr) { 6856 pmap_t pm; 6857 6858 LIST_FOREACH(pm, &allpmaps, pm_list) 6859 if (pm == pmap) break; 6860 if (pm == NULL) { 6861 printf("given pmap %p is not in allpmaps list\n", pmap); 6862 return; 6863 } 6864 } else 6865 pmap = PCPU_GET(curpmap); 6866 6867 eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF; 6868 dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */ 6869 6870 printf("pmap: 0x%08X\n", (uint32_t)pmap); 6871 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6872 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6873 6874 for(i = 0; i < NPTE1_IN_PT1; i++) { 6875 pte1 = pte1_load(&pmap->pm_pt1[i]); 6876 if (pte1 == 0) 6877 continue; 6878 va = i << PTE1_SHIFT; 6879 if (va >= eva) 6880 break; 6881 6882 if (pte1_is_section(pte1)) { 6883 printf("0x%08X: Section 0x%08X, s:%d g:%d\n", va, pte1, 6884 !!(pte1 & PTE1_S), !(pte1 & PTE1_NG)); 6885 dump_section(pmap, i); 6886 } else if (pte1_is_link(pte1)) { 6887 dump_link_ok = TRUE; 6888 invalid_ok = FALSE; 6889 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6890 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 6891 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p", 6892 va, pte1, pte2, m); 6893 if (is_pv_chunk_space(va)) { 6894 printf(" - pv_chunk space"); 6895 if (dump_pv_chunk) 6896 invalid_ok = TRUE; 6897 else 6898 dump_link_ok = FALSE; 6899 } 6900 else if (m != NULL) 6901 printf(" w:%d w2:%u", m->wire_count, 6902 pt2_wirecount_get(m, pte1_index(va))); 6903 if (pte2 == 0) 6904 printf(" !!! pt2tab entry is ZERO"); 6905 else if (pte2_pa(pte1) != pte2_pa(pte2)) 6906 printf(" !!! pt2tab entry is DIFFERENT - m: %p", 6907 PHYS_TO_VM_PAGE(pte2_pa(pte2))); 6908 printf("\n"); 6909 if (dump_link_ok) 6910 dump_link(pmap, i, invalid_ok); 6911 } else 6912 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6913 } 6914 } 6915 6916 static void 6917 dump_pt2tab(pmap_t pmap) 6918 { 6919 uint32_t i; 6920 pt2_entry_t pte2; 6921 vm_offset_t va; 6922 vm_paddr_t pa; 6923 vm_page_t m; 6924 6925 printf("PT2TAB:\n"); 6926 for (i = 0; i < PT2TAB_ENTRIES; i++) { 6927 pte2 = pte2_load(&pmap->pm_pt2tab[i]); 6928 if (!pte2_is_valid(pte2)) 6929 continue; 6930 va = i << PT2TAB_SHIFT; 6931 pa = pte2_pa(pte2); 6932 m = PHYS_TO_VM_PAGE(pa); 6933 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2, 6934 pte2_class(pte2), !!(pte2 & PTE2_S), m); 6935 if (m != NULL) 6936 printf(" , h: %d, w: %d, f: 0x%04X pidx: %lld", 6937 m->hold_count, m->wire_count, m->flags, m->pindex); 6938 printf("\n"); 6939 } 6940 } 6941 6942 DB_SHOW_COMMAND(pmap_pt2tab, pmap_pt2tab_print) 6943 { 6944 /* XXX convert args. */ 6945 pmap_t pmap = (pmap_t)addr; 6946 pt1_entry_t pte1; 6947 pt2_entry_t pte2; 6948 vm_offset_t va; 6949 uint32_t i, start; 6950 6951 if (have_addr) { 6952 printf("supported only on current pmap\n"); 6953 return; 6954 } 6955 6956 pmap = PCPU_GET(curpmap); 6957 printf("curpmap: 0x%08X\n", (uint32_t)pmap); 6958 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6959 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6960 6961 start = pte1_index((vm_offset_t)PT2MAP); 6962 for (i = start; i < (start + NPT2_IN_PT2TAB); i++) { 6963 pte1 = pte1_load(&pmap->pm_pt1[i]); 6964 if (pte1 == 0) 6965 continue; 6966 va = i << PTE1_SHIFT; 6967 if (pte1_is_section(pte1)) { 6968 printf("0x%08X: Section 0x%08X, s:%d\n", va, pte1, 6969 !!(pte1 & PTE1_S)); 6970 dump_section(pmap, i); 6971 } else if (pte1_is_link(pte1)) { 6972 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6973 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X\n", va, 6974 pte1, pte2); 6975 if (pte2 == 0) 6976 printf(" !!! pt2tab entry is ZERO\n"); 6977 } else 6978 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6979 } 6980 dump_pt2tab(pmap); 6981 } 6982 #endif 6983