1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * Copyright (c) 1994 John S. Dyson 6 * Copyright (c) 1994 David Greenman 7 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 8 * Copyright (c) 2014-2016 Svatopluk Kraus <skra@FreeBSD.org> 9 * Copyright (c) 2014-2016 Michal Meloun <mmel@FreeBSD.org> 10 * All rights reserved. 11 * 12 * This code is derived from software contributed to Berkeley by 13 * the Systems Programming Group of the University of Utah Computer 14 * Science Department and William Jolitz of UUNET Technologies Inc. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 */ 40 /*- 41 * Copyright (c) 2003 Networks Associates Technology, Inc. 42 * All rights reserved. 43 * 44 * This software was developed for the FreeBSD Project by Jake Burkholder, 45 * Safeport Network Services, and Network Associates Laboratories, the 46 * Security Research Division of Network Associates, Inc. under 47 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 48 * CHATS research program. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 */ 71 72 #include <sys/cdefs.h> 73 /* 74 * Manages physical address maps. 75 * 76 * Since the information managed by this module is 77 * also stored by the logical address mapping module, 78 * this module may throw away valid virtual-to-physical 79 * mappings at almost any time. However, invalidations 80 * of virtual-to-physical mappings must be done as 81 * requested. 82 * 83 * In order to cope with hardware architectures which 84 * make virtual-to-physical map invalidates expensive, 85 * this module may delay invalidate or reduced protection 86 * operations until such time as they are actually 87 * necessary. This module is given full information as 88 * to which processors are currently using which maps, 89 * and to when physical maps must be made correct. 90 */ 91 92 #include "opt_vm.h" 93 #include "opt_pmap.h" 94 #include "opt_ddb.h" 95 96 #include <sys/param.h> 97 #include <sys/systm.h> 98 #include <sys/kernel.h> 99 #include <sys/ktr.h> 100 #include <sys/lock.h> 101 #include <sys/proc.h> 102 #include <sys/rwlock.h> 103 #include <sys/malloc.h> 104 #include <sys/vmmeter.h> 105 #include <sys/malloc.h> 106 #include <sys/mman.h> 107 #include <sys/sf_buf.h> 108 #include <sys/smp.h> 109 #include <sys/sched.h> 110 #include <sys/sysctl.h> 111 112 #ifdef DDB 113 #include <ddb/ddb.h> 114 #endif 115 116 #include <vm/vm.h> 117 #include <vm/uma.h> 118 #include <vm/pmap.h> 119 #include <vm/vm_param.h> 120 #include <vm/vm_kern.h> 121 #include <vm/vm_object.h> 122 #include <vm/vm_map.h> 123 #include <vm/vm_page.h> 124 #include <vm/vm_pageout.h> 125 #include <vm/vm_phys.h> 126 #include <vm/vm_extern.h> 127 #include <vm/vm_reserv.h> 128 #include <sys/lock.h> 129 #include <sys/mutex.h> 130 131 #include <machine/md_var.h> 132 #include <machine/pmap_var.h> 133 #include <machine/cpu.h> 134 #include <machine/pcb.h> 135 #include <machine/sf_buf.h> 136 #ifdef SMP 137 #include <machine/smp.h> 138 #endif 139 #ifndef PMAP_SHPGPERPROC 140 #define PMAP_SHPGPERPROC 200 141 #endif 142 143 #ifndef DIAGNOSTIC 144 #define PMAP_INLINE __inline 145 #else 146 #define PMAP_INLINE 147 #endif 148 149 #ifdef PMAP_DEBUG 150 static void pmap_zero_page_check(vm_page_t m); 151 void pmap_debug(int level); 152 int pmap_pid_dump(int pid); 153 154 #define PDEBUG(_lev_,_stat_) \ 155 if (pmap_debug_level >= (_lev_)) \ 156 ((_stat_)) 157 #define dprintf printf 158 int pmap_debug_level = 1; 159 #else /* PMAP_DEBUG */ 160 #define PDEBUG(_lev_,_stat_) /* Nothing */ 161 #define dprintf(x, arg...) 162 #endif /* PMAP_DEBUG */ 163 164 /* 165 * Level 2 page tables map definion ('max' is excluded). 166 */ 167 168 #define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 169 #define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE) 170 171 #define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 172 #define UPT2V_MAX_ADDRESS \ 173 ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT))) 174 175 /* 176 * Promotion to a 1MB (PTE1) page mapping requires that the corresponding 177 * 4KB (PTE2) page mappings have identical settings for the following fields: 178 */ 179 #define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \ 180 PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \ 181 PTE2_ATTR_MASK) 182 183 #define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \ 184 PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \ 185 PTE1_ATTR_MASK) 186 187 #define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \ 188 (((l2_attr) & L2_C) ? L1_S_C : 0) | \ 189 (((l2_attr) & L2_B) ? L1_S_B : 0) | \ 190 (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \ 191 (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \ 192 (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \ 193 (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \ 194 (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \ 195 (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \ 196 (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \ 197 (((l2_attr) & PTE2_W) ? PTE1_W : 0)) 198 199 #define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \ 200 (((l1_attr) & L1_S_C) ? L2_C : 0) | \ 201 (((l1_attr) & L1_S_B) ? L2_B : 0) | \ 202 (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \ 203 (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \ 204 (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \ 205 (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \ 206 (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \ 207 (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \ 208 (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \ 209 (((l1_attr) & PTE1_W) ? PTE2_W : 0)) 210 211 /* 212 * PTE2 descriptors creation macros. 213 */ 214 #define PTE2_ATTR_DEFAULT vm_memattr_to_pte2(VM_MEMATTR_DEFAULT) 215 #define PTE2_ATTR_PT vm_memattr_to_pte2(pt_memattr) 216 217 #define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 218 #define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 219 220 #define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT) 221 #define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_DEFAULT) 222 223 #define PV_STATS 224 #ifdef PV_STATS 225 #define PV_STAT(x) do { x ; } while (0) 226 #else 227 #define PV_STAT(x) do { } while (0) 228 #endif 229 230 /* 231 * The boot_pt1 is used temporary in very early boot stage as L1 page table. 232 * We can init many things with no memory allocation thanks to its static 233 * allocation and this brings two main advantages: 234 * (1) other cores can be started very simply, 235 * (2) various boot loaders can be supported as its arguments can be processed 236 * in virtual address space and can be moved to safe location before 237 * first allocation happened. 238 * Only disadvantage is that boot_pt1 is used only in very early boot stage. 239 * However, the table is uninitialized and so lays in bss. Therefore kernel 240 * image size is not influenced. 241 * 242 * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and 243 * CPU suspend/resume game. 244 */ 245 extern pt1_entry_t boot_pt1[]; 246 247 vm_paddr_t base_pt1; 248 pt1_entry_t *kern_pt1; 249 pt2_entry_t *kern_pt2tab; 250 pt2_entry_t *PT2MAP; 251 252 static uint32_t ttb_flags; 253 static vm_memattr_t pt_memattr; 254 ttb_entry_t pmap_kern_ttb; 255 256 struct pmap kernel_pmap_store; 257 LIST_HEAD(pmaplist, pmap); 258 static struct pmaplist allpmaps; 259 static struct mtx allpmaps_lock; 260 261 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 262 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 263 264 static vm_offset_t kernel_vm_end_new; 265 vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE; 266 vm_offset_t vm_max_kernel_address; 267 vm_paddr_t kernel_l1pa; 268 269 static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock; 270 271 /* 272 * Data for the pv entry allocation mechanism 273 */ 274 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 275 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 276 static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */ 277 static int shpgperproc = PMAP_SHPGPERPROC; 278 279 struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 280 int pv_maxchunks; /* How many chunks we have KVA for */ 281 vm_offset_t pv_vafree; /* freelist stored in the PTE */ 282 283 vm_paddr_t first_managed_pa; 284 #define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) 285 286 /* 287 * All those kernel PT submaps that BSD is so fond of 288 */ 289 caddr_t _tmppt = 0; 290 291 /* 292 * Crashdump maps. 293 */ 294 static caddr_t crashdumpmap; 295 296 static pt2_entry_t *PMAP1 = NULL, *PMAP2; 297 static pt2_entry_t *PADDR1 = NULL, *PADDR2; 298 #ifdef DDB 299 static pt2_entry_t *PMAP3; 300 static pt2_entry_t *PADDR3; 301 static int PMAP3cpu __unused; /* for SMP only */ 302 #endif 303 #ifdef SMP 304 static int PMAP1cpu; 305 static int PMAP1changedcpu; 306 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 307 &PMAP1changedcpu, 0, 308 "Number of times pmap_pte2_quick changed CPU with same PMAP1"); 309 #endif 310 static int PMAP1changed; 311 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 312 &PMAP1changed, 0, 313 "Number of times pmap_pte2_quick changed PMAP1"); 314 static int PMAP1unchanged; 315 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 316 &PMAP1unchanged, 0, 317 "Number of times pmap_pte2_quick didn't change PMAP1"); 318 static struct mtx PMAP2mutex; 319 320 /* 321 * Internal flags for pmap_enter()'s helper functions. 322 */ 323 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 324 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 325 326 static __inline void pt2_wirecount_init(vm_page_t m); 327 static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, 328 vm_offset_t va); 329 static int pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, 330 u_int flags, vm_page_t m); 331 void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size); 332 333 /* 334 * Function to set the debug level of the pmap code. 335 */ 336 #ifdef PMAP_DEBUG 337 void 338 pmap_debug(int level) 339 { 340 341 pmap_debug_level = level; 342 dprintf("pmap_debug: level=%d\n", pmap_debug_level); 343 } 344 #endif /* PMAP_DEBUG */ 345 346 /* 347 * This table must corespond with memory attribute configuration in vm.h. 348 * First entry is used for normal system mapping. 349 * 350 * Device memory is always marked as shared. 351 * Normal memory is shared only in SMP . 352 * Not outer shareable bits are not used yet. 353 * Class 6 cannot be used on ARM11. 354 */ 355 #define TEXDEF_TYPE_SHIFT 0 356 #define TEXDEF_TYPE_MASK 0x3 357 #define TEXDEF_INNER_SHIFT 2 358 #define TEXDEF_INNER_MASK 0x3 359 #define TEXDEF_OUTER_SHIFT 4 360 #define TEXDEF_OUTER_MASK 0x3 361 #define TEXDEF_NOS_SHIFT 6 362 #define TEXDEF_NOS_MASK 0x1 363 364 #define TEX(t, i, o, s) \ 365 ((t) << TEXDEF_TYPE_SHIFT) | \ 366 ((i) << TEXDEF_INNER_SHIFT) | \ 367 ((o) << TEXDEF_OUTER_SHIFT | \ 368 ((s) << TEXDEF_NOS_SHIFT)) 369 370 static uint32_t tex_class[8] = { 371 /* type inner cache outer cache */ 372 TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */ 373 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */ 374 TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */ 375 TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */ 376 TEX(PRRR_MEM, NMRR_WT, NMRR_WT, 0), /* 4 - ATTR_WT */ 377 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */ 378 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */ 379 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */ 380 }; 381 #undef TEX 382 383 static uint32_t pte2_attr_tab[8] = { 384 PTE2_ATTR_WB_WA, /* 0 - VM_MEMATTR_WB_WA */ 385 PTE2_ATTR_NOCACHE, /* 1 - VM_MEMATTR_NOCACHE */ 386 PTE2_ATTR_DEVICE, /* 2 - VM_MEMATTR_DEVICE */ 387 PTE2_ATTR_SO, /* 3 - VM_MEMATTR_SO */ 388 PTE2_ATTR_WT, /* 4 - VM_MEMATTR_WRITE_THROUGH */ 389 0, /* 5 - NOT USED YET */ 390 0, /* 6 - NOT USED YET */ 391 0 /* 7 - NOT USED YET */ 392 }; 393 CTASSERT(VM_MEMATTR_WB_WA == 0); 394 CTASSERT(VM_MEMATTR_NOCACHE == 1); 395 CTASSERT(VM_MEMATTR_DEVICE == 2); 396 CTASSERT(VM_MEMATTR_SO == 3); 397 CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4); 398 #define VM_MEMATTR_END (VM_MEMATTR_WRITE_THROUGH + 1) 399 400 boolean_t 401 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) 402 { 403 404 return (mode >= 0 && mode < VM_MEMATTR_END); 405 } 406 407 static inline uint32_t 408 vm_memattr_to_pte2(vm_memattr_t ma) 409 { 410 411 KASSERT((u_int)ma < VM_MEMATTR_END, 412 ("%s: bad vm_memattr_t %d", __func__, ma)); 413 return (pte2_attr_tab[(u_int)ma]); 414 } 415 416 static inline uint32_t 417 vm_page_pte2_attr(vm_page_t m) 418 { 419 420 return (vm_memattr_to_pte2(m->md.pat_mode)); 421 } 422 423 /* 424 * Convert TEX definition entry to TTB flags. 425 */ 426 static uint32_t 427 encode_ttb_flags(int idx) 428 { 429 uint32_t inner, outer, nos, reg; 430 431 inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) & 432 TEXDEF_INNER_MASK; 433 outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) & 434 TEXDEF_OUTER_MASK; 435 nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) & 436 TEXDEF_NOS_MASK; 437 438 reg = nos << 5; 439 reg |= outer << 3; 440 if (cpuinfo.coherent_walk) 441 reg |= (inner & 0x1) << 6; 442 reg |= (inner & 0x2) >> 1; 443 #ifdef SMP 444 ARM_SMP_UP( 445 reg |= 1 << 1, 446 ); 447 #endif 448 return reg; 449 } 450 451 /* 452 * Set TEX remapping registers in current CPU. 453 */ 454 void 455 pmap_set_tex(void) 456 { 457 uint32_t prrr, nmrr; 458 uint32_t type, inner, outer, nos; 459 int i; 460 461 #ifdef PMAP_PTE_NOCACHE 462 /* XXX fixme */ 463 if (cpuinfo.coherent_walk) { 464 pt_memattr = VM_MEMATTR_WB_WA; 465 ttb_flags = encode_ttb_flags(0); 466 } 467 else { 468 pt_memattr = VM_MEMATTR_NOCACHE; 469 ttb_flags = encode_ttb_flags(1); 470 } 471 #else 472 pt_memattr = VM_MEMATTR_WB_WA; 473 ttb_flags = encode_ttb_flags(0); 474 #endif 475 476 prrr = 0; 477 nmrr = 0; 478 479 /* Build remapping register from TEX classes. */ 480 for (i = 0; i < 8; i++) { 481 type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) & 482 TEXDEF_TYPE_MASK; 483 inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) & 484 TEXDEF_INNER_MASK; 485 outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) & 486 TEXDEF_OUTER_MASK; 487 nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) & 488 TEXDEF_NOS_MASK; 489 490 prrr |= type << (i * 2); 491 prrr |= nos << (i + 24); 492 nmrr |= inner << (i * 2); 493 nmrr |= outer << (i * 2 + 16); 494 } 495 /* Add shareable bits for device memory. */ 496 prrr |= PRRR_DS0 | PRRR_DS1; 497 498 /* Add shareable bits for normal memory in SMP case. */ 499 #ifdef SMP 500 ARM_SMP_UP( 501 prrr |= PRRR_NS1, 502 ); 503 #endif 504 cp15_prrr_set(prrr); 505 cp15_nmrr_set(nmrr); 506 507 /* Caches are disabled, so full TLB flush should be enough. */ 508 tlb_flush_all_local(); 509 } 510 511 /* 512 * Remap one vm_meattr class to another one. This can be useful as 513 * workaround for SOC errata, e.g. if devices must be accessed using 514 * SO memory class. 515 * 516 * !!! Please note that this function is absolutely last resort thing. 517 * It should not be used under normal circumstances. !!! 518 * 519 * Usage rules: 520 * - it shall be called after pmap_bootstrap_prepare() and before 521 * cpu_mp_start() (thus only on boot CPU). In practice, it's expected 522 * to be called from platform_attach() or platform_late_init(). 523 * 524 * - if remapping doesn't change caching mode, or until uncached class 525 * is remapped to any kind of cached one, then no other restriction exists. 526 * 527 * - if pmap_remap_vm_attr() changes caching mode, but both (original and 528 * remapped) remain cached, then caller is resposible for calling 529 * of dcache_wbinv_poc_all(). 530 * 531 * - remapping of any kind of cached class to uncached is not permitted. 532 */ 533 void 534 pmap_remap_vm_attr(vm_memattr_t old_attr, vm_memattr_t new_attr) 535 { 536 int old_idx, new_idx; 537 538 /* Map VM memattrs to indexes to tex_class table. */ 539 old_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)old_attr]); 540 new_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)new_attr]); 541 542 /* Replace TEX attribute and apply it. */ 543 tex_class[old_idx] = tex_class[new_idx]; 544 pmap_set_tex(); 545 } 546 547 /* 548 * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words, 549 * KERNBASE is mapped by first L2 page table in L2 page table page. It 550 * meets same constrain due to PT2MAP being placed just under KERNBASE. 551 */ 552 CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0); 553 CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE); 554 555 /* 556 * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general. 557 * For now, anyhow, the following check must be fulfilled. 558 */ 559 CTASSERT(PAGE_SIZE == PTE2_SIZE); 560 /* 561 * We don't want to mess up MI code with all MMU and PMAP definitions, 562 * so some things, which depend on other ones, are defined independently. 563 * Now, it is time to check that we don't screw up something. 564 */ 565 CTASSERT(PDRSHIFT == PTE1_SHIFT); 566 /* 567 * Check L1 and L2 page table entries definitions consistency. 568 */ 569 CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1)); 570 CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2)); 571 /* 572 * Check L2 page tables page consistency. 573 */ 574 CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2)); 575 CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG); 576 /* 577 * Check PT2TAB consistency. 578 * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG. 579 * This should be done without remainder. 580 */ 581 CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG)); 582 583 /* 584 * A PT2MAP magic. 585 * 586 * All level 2 page tables (PT2s) are mapped continuously and accordingly 587 * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can 588 * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page 589 * must be used together, but not necessary at once. The first PT2 in a page 590 * must map things on correctly aligned address and the others must follow 591 * in right order. 592 */ 593 #define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t)) 594 #define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2) 595 #define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE) 596 597 /* 598 * Check PT2TAB consistency. 599 * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2. 600 * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE. 601 * The both should be done without remainder. 602 */ 603 CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2)); 604 CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE)); 605 /* 606 * The implementation was made general, however, with the assumption 607 * bellow in mind. In case of another value of NPG_IN_PT2TAB, 608 * the code should be once more rechecked. 609 */ 610 CTASSERT(NPG_IN_PT2TAB == 1); 611 612 /* 613 * Get offset of PT2 in a page 614 * associated with given PT1 index. 615 */ 616 static __inline u_int 617 page_pt2off(u_int pt1_idx) 618 { 619 620 return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2); 621 } 622 623 /* 624 * Get physical address of PT2 625 * associated with given PT2s page and PT1 index. 626 */ 627 static __inline vm_paddr_t 628 page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx) 629 { 630 631 return (pgpa + page_pt2off(pt1_idx)); 632 } 633 634 /* 635 * Get first entry of PT2 636 * associated with given PT2s page and PT1 index. 637 */ 638 static __inline pt2_entry_t * 639 page_pt2(vm_offset_t pgva, u_int pt1_idx) 640 { 641 642 return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx))); 643 } 644 645 /* 646 * Get virtual address of PT2s page (mapped in PT2MAP) 647 * which holds PT2 which holds entry which maps given virtual address. 648 */ 649 static __inline vm_offset_t 650 pt2map_pt2pg(vm_offset_t va) 651 { 652 653 va &= ~(NPT2_IN_PG * PTE1_SIZE - 1); 654 return ((vm_offset_t)pt2map_entry(va)); 655 } 656 657 /***************************************************************************** 658 * 659 * THREE pmap initialization milestones exist: 660 * 661 * locore.S 662 * -> fundamental init (including MMU) in ASM 663 * 664 * initarm() 665 * -> fundamental init continues in C 666 * -> first available physical address is known 667 * 668 * pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins) 669 * -> basic (safe) interface for physical address allocation is made 670 * -> basic (safe) interface for virtual mapping is made 671 * -> limited not SMP coherent work is possible 672 * 673 * -> more fundamental init continues in C 674 * -> locks and some more things are available 675 * -> all fundamental allocations and mappings are done 676 * 677 * pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins) 678 * -> phys_avail[] and virtual_avail is set 679 * -> control is passed to vm subsystem 680 * -> physical and virtual address allocation are off limit 681 * -> low level mapping functions, some SMP coherent, 682 * are available, which cannot be used before vm subsystem 683 * is being inited 684 * 685 * mi_startup() 686 * -> vm subsystem is being inited 687 * 688 * pmap_init() -> THIRD PMAP MILESTONE (third epoch begins) 689 * -> pmap is fully inited 690 * 691 *****************************************************************************/ 692 693 /***************************************************************************** 694 * 695 * PMAP first stage initialization and utility functions 696 * for pre-bootstrap epoch. 697 * 698 * After pmap_bootstrap_prepare() is called, the following functions 699 * can be used: 700 * 701 * (1) strictly only for this stage functions for physical page allocations, 702 * virtual space allocations, and mappings: 703 * 704 * vm_paddr_t pmap_preboot_get_pages(u_int num); 705 * void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num); 706 * vm_offset_t pmap_preboot_reserve_pages(u_int num); 707 * vm_offset_t pmap_preboot_get_vpages(u_int num); 708 * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 709 * vm_prot_t prot, vm_memattr_t attr); 710 * 711 * (2) for all stages: 712 * 713 * vm_paddr_t pmap_kextract(vm_offset_t va); 714 * 715 * NOTE: This is not SMP coherent stage. 716 * 717 *****************************************************************************/ 718 719 #define KERNEL_P2V(pa) \ 720 ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR)) 721 #define KERNEL_V2P(va) \ 722 ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr)) 723 724 static vm_paddr_t last_paddr; 725 726 /* 727 * Pre-bootstrap epoch page allocator. 728 */ 729 vm_paddr_t 730 pmap_preboot_get_pages(u_int num) 731 { 732 vm_paddr_t ret; 733 734 ret = last_paddr; 735 last_paddr += num * PAGE_SIZE; 736 737 return (ret); 738 } 739 740 /* 741 * The fundamental initialization of PMAP stuff. 742 * 743 * Some things already happened in locore.S and some things could happen 744 * before pmap_bootstrap_prepare() is called, so let's recall what is done: 745 * 1. Caches are disabled. 746 * 2. We are running on virtual addresses already with 'boot_pt1' 747 * as L1 page table. 748 * 3. So far, all virtual addresses can be converted to physical ones and 749 * vice versa by the following macros: 750 * KERNEL_P2V(pa) .... physical to virtual ones, 751 * KERNEL_V2P(va) .... virtual to physical ones. 752 * 753 * What is done herein: 754 * 1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'. 755 * 2. PT2MAP magic is brought to live. 756 * 3. Basic preboot functions for page allocations and mappings can be used. 757 * 4. Everything is prepared for L1 cache enabling. 758 * 759 * Variations: 760 * 1. To use second TTB register, so kernel and users page tables will be 761 * separated. This way process forking - pmap_pinit() - could be faster, 762 * it saves physical pages and KVA per a process, and it's simple change. 763 * However, it will lead, due to hardware matter, to the following: 764 * (a) 2G space for kernel and 2G space for users. 765 * (b) 1G space for kernel in low addresses and 3G for users above it. 766 * A question is: Is the case (b) really an option? Note that case (b) 767 * does save neither physical memory and KVA. 768 */ 769 void 770 pmap_bootstrap_prepare(vm_paddr_t last) 771 { 772 vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size; 773 vm_offset_t pt2pg_va; 774 pt1_entry_t *pte1p; 775 pt2_entry_t *pte2p; 776 u_int i; 777 uint32_t l1_attr; 778 779 /* 780 * Now, we are going to make real kernel mapping. Note that we are 781 * already running on some mapping made in locore.S and we expect 782 * that it's large enough to ensure nofault access to physical memory 783 * allocated herein before switch. 784 * 785 * As kernel image and everything needed before are and will be mapped 786 * by section mappings, we align last physical address to PTE1_SIZE. 787 */ 788 last_paddr = pte1_roundup(last); 789 790 /* 791 * Allocate and zero page(s) for kernel L1 page table. 792 * 793 * Note that it's first allocation on space which was PTE1_SIZE 794 * aligned and as such base_pt1 is aligned to NB_IN_PT1 too. 795 */ 796 base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1); 797 kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1); 798 bzero((void*)kern_pt1, NB_IN_PT1); 799 pte1_sync_range(kern_pt1, NB_IN_PT1); 800 801 /* Allocate and zero page(s) for kernel PT2TAB. */ 802 pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB); 803 kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa); 804 bzero(kern_pt2tab, NB_IN_PT2TAB); 805 pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB); 806 807 /* Allocate and zero page(s) for kernel L2 page tables. */ 808 pt2pg_pa = pmap_preboot_get_pages(NKPT2PG); 809 pt2pg_va = KERNEL_P2V(pt2pg_pa); 810 size = NKPT2PG * PAGE_SIZE; 811 bzero((void*)pt2pg_va, size); 812 pte2_sync_range((pt2_entry_t *)pt2pg_va, size); 813 814 /* 815 * Add a physical memory segment (vm_phys_seg) corresponding to the 816 * preallocated pages for kernel L2 page tables so that vm_page 817 * structures representing these pages will be created. The vm_page 818 * structures are required for promotion of the corresponding kernel 819 * virtual addresses to section mappings. 820 */ 821 vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0)); 822 823 /* 824 * Insert allocated L2 page table pages to PT2TAB and make 825 * link to all PT2s in L1 page table. See how kernel_vm_end 826 * is initialized. 827 * 828 * We play simple and safe. So every KVA will have underlaying 829 * L2 page table, even kernel image mapped by sections. 830 */ 831 pte2p = kern_pt2tab_entry(KERNBASE); 832 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE) 833 pt2tab_store(pte2p++, PTE2_KPT(pa)); 834 835 pte1p = kern_pte1(KERNBASE); 836 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2) 837 pte1_store(pte1p++, PTE1_LINK(pa)); 838 839 /* Make section mappings for kernel. */ 840 l1_attr = ATTR_TO_L1(PTE2_ATTR_DEFAULT); 841 pte1p = kern_pte1(KERNBASE); 842 for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE) 843 pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW, l1_attr)); 844 845 /* 846 * Get free and aligned space for PT2MAP and make L1 page table links 847 * to L2 page tables held in PT2TAB. 848 * 849 * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t 850 * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus 851 * each entry in PT2TAB maps all PT2s in a page. This implies that 852 * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE. 853 */ 854 PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE); 855 pte1p = kern_pte1((vm_offset_t)PT2MAP); 856 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 857 pte1_store(pte1p++, PTE1_LINK(pa)); 858 } 859 860 /* 861 * Store PT2TAB in PT2TAB itself, i.e. self reference mapping. 862 * Each pmap will hold own PT2TAB, so the mapping should be not global. 863 */ 864 pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP); 865 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 866 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 867 } 868 869 /* 870 * Choose correct L2 page table and make mappings for allocations 871 * made herein which replaces temporary locore.S mappings after a while. 872 * Note that PT2MAP cannot be used until we switch to kern_pt1. 873 * 874 * Note, that these allocations started aligned on 1M section and 875 * kernel PT1 was allocated first. Making of mappings must follow 876 * order of physical allocations as we've used KERNEL_P2V() macro 877 * for virtual addresses resolution. 878 */ 879 pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1); 880 pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p))); 881 882 pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1)); 883 884 /* Make mapping for kernel L1 page table. */ 885 for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE) 886 pte2_store(pte2p++, PTE2_KPT(pa)); 887 888 /* Make mapping for kernel PT2TAB. */ 889 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) 890 pte2_store(pte2p++, PTE2_KPT(pa)); 891 892 /* Finally, switch from 'boot_pt1' to 'kern_pt1'. */ 893 pmap_kern_ttb = base_pt1 | ttb_flags; 894 cpuinfo_reinit_mmu(pmap_kern_ttb); 895 /* 896 * Initialize the first available KVA. As kernel image is mapped by 897 * sections, we are leaving some gap behind. 898 */ 899 virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE; 900 } 901 902 /* 903 * Setup L2 page table page for given KVA. 904 * Used in pre-bootstrap epoch. 905 * 906 * Note that we have allocated NKPT2PG pages for L2 page tables in advance 907 * and used them for mapping KVA starting from KERNBASE. However, this is not 908 * enough. Vectors and devices need L2 page tables too. Note that they are 909 * even above VM_MAX_KERNEL_ADDRESS. 910 */ 911 static __inline vm_paddr_t 912 pmap_preboot_pt2pg_setup(vm_offset_t va) 913 { 914 pt2_entry_t *pte2p, pte2; 915 vm_paddr_t pt2pg_pa; 916 917 /* Get associated entry in PT2TAB. */ 918 pte2p = kern_pt2tab_entry(va); 919 920 /* Just return, if PT2s page exists already. */ 921 pte2 = pt2tab_load(pte2p); 922 if (pte2_is_valid(pte2)) 923 return (pte2_pa(pte2)); 924 925 KASSERT(va >= VM_MAX_KERNEL_ADDRESS, 926 ("%s: NKPT2PG too small", __func__)); 927 928 /* 929 * Allocate page for PT2s and insert it to PT2TAB. 930 * In other words, map it into PT2MAP space. 931 */ 932 pt2pg_pa = pmap_preboot_get_pages(1); 933 pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa)); 934 935 /* Zero all PT2s in allocated page. */ 936 bzero((void*)pt2map_pt2pg(va), PAGE_SIZE); 937 pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE); 938 939 return (pt2pg_pa); 940 } 941 942 /* 943 * Setup L2 page table for given KVA. 944 * Used in pre-bootstrap epoch. 945 */ 946 static void 947 pmap_preboot_pt2_setup(vm_offset_t va) 948 { 949 pt1_entry_t *pte1p; 950 vm_paddr_t pt2pg_pa, pt2_pa; 951 952 /* Setup PT2's page. */ 953 pt2pg_pa = pmap_preboot_pt2pg_setup(va); 954 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va)); 955 956 /* Insert PT2 to PT1. */ 957 pte1p = kern_pte1(va); 958 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 959 } 960 961 /* 962 * Get L2 page entry associated with given KVA. 963 * Used in pre-bootstrap epoch. 964 */ 965 static __inline pt2_entry_t* 966 pmap_preboot_vtopte2(vm_offset_t va) 967 { 968 pt1_entry_t *pte1p; 969 970 /* Setup PT2 if needed. */ 971 pte1p = kern_pte1(va); 972 if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */ 973 pmap_preboot_pt2_setup(va); 974 975 return (pt2map_entry(va)); 976 } 977 978 /* 979 * Pre-bootstrap epoch page(s) mapping(s). 980 */ 981 void 982 pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num) 983 { 984 u_int i; 985 pt2_entry_t *pte2p; 986 987 /* Map all the pages. */ 988 for (i = 0; i < num; i++) { 989 pte2p = pmap_preboot_vtopte2(va); 990 pte2_store(pte2p, PTE2_KRW(pa)); 991 va += PAGE_SIZE; 992 pa += PAGE_SIZE; 993 } 994 } 995 996 /* 997 * Pre-bootstrap epoch virtual space alocator. 998 */ 999 vm_offset_t 1000 pmap_preboot_reserve_pages(u_int num) 1001 { 1002 u_int i; 1003 vm_offset_t start, va; 1004 pt2_entry_t *pte2p; 1005 1006 /* Allocate virtual space. */ 1007 start = va = virtual_avail; 1008 virtual_avail += num * PAGE_SIZE; 1009 1010 /* Zero the mapping. */ 1011 for (i = 0; i < num; i++) { 1012 pte2p = pmap_preboot_vtopte2(va); 1013 pte2_store(pte2p, 0); 1014 va += PAGE_SIZE; 1015 } 1016 1017 return (start); 1018 } 1019 1020 /* 1021 * Pre-bootstrap epoch page(s) allocation and mapping(s). 1022 */ 1023 vm_offset_t 1024 pmap_preboot_get_vpages(u_int num) 1025 { 1026 vm_paddr_t pa; 1027 vm_offset_t va; 1028 1029 /* Allocate physical page(s). */ 1030 pa = pmap_preboot_get_pages(num); 1031 1032 /* Allocate virtual space. */ 1033 va = virtual_avail; 1034 virtual_avail += num * PAGE_SIZE; 1035 1036 /* Map and zero all. */ 1037 pmap_preboot_map_pages(pa, va, num); 1038 bzero((void *)va, num * PAGE_SIZE); 1039 1040 return (va); 1041 } 1042 1043 /* 1044 * Pre-bootstrap epoch page mapping(s) with attributes. 1045 */ 1046 void 1047 pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 1048 vm_prot_t prot, vm_memattr_t attr) 1049 { 1050 u_int num; 1051 u_int l1_attr, l1_prot, l2_prot, l2_attr; 1052 pt1_entry_t *pte1p; 1053 pt2_entry_t *pte2p; 1054 1055 l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR; 1056 l2_prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1057 l2_attr = vm_memattr_to_pte2(attr); 1058 l1_prot = ATTR_TO_L1(l2_prot); 1059 l1_attr = ATTR_TO_L1(l2_attr); 1060 1061 /* Map all the pages. */ 1062 num = round_page(size); 1063 while (num > 0) { 1064 if ((((va | pa) & PTE1_OFFSET) == 0) && (num >= PTE1_SIZE)) { 1065 pte1p = kern_pte1(va); 1066 pte1_store(pte1p, PTE1_KERN(pa, l1_prot, l1_attr)); 1067 va += PTE1_SIZE; 1068 pa += PTE1_SIZE; 1069 num -= PTE1_SIZE; 1070 } else { 1071 pte2p = pmap_preboot_vtopte2(va); 1072 pte2_store(pte2p, PTE2_KERN(pa, l2_prot, l2_attr)); 1073 va += PAGE_SIZE; 1074 pa += PAGE_SIZE; 1075 num -= PAGE_SIZE; 1076 } 1077 } 1078 } 1079 1080 /* 1081 * Extract from the kernel page table the physical address 1082 * that is mapped by the given virtual address "va". 1083 */ 1084 vm_paddr_t 1085 pmap_kextract(vm_offset_t va) 1086 { 1087 vm_paddr_t pa; 1088 pt1_entry_t pte1; 1089 pt2_entry_t pte2; 1090 1091 pte1 = pte1_load(kern_pte1(va)); 1092 if (pte1_is_section(pte1)) { 1093 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1094 } else if (pte1_is_link(pte1)) { 1095 /* 1096 * We should beware of concurrent promotion that changes 1097 * pte1 at this point. However, it's not a problem as PT2 1098 * page is preserved by promotion in PT2TAB. So even if 1099 * it happens, using of PT2MAP is still safe. 1100 * 1101 * QQQ: However, concurrent removing is a problem which 1102 * ends in abort on PT2MAP space. Locking must be used 1103 * to deal with this. 1104 */ 1105 pte2 = pte2_load(pt2map_entry(va)); 1106 pa = pte2_pa(pte2) | (va & PTE2_OFFSET); 1107 } 1108 else { 1109 panic("%s: va %#x pte1 %#x", __func__, va, pte1); 1110 } 1111 return (pa); 1112 } 1113 1114 /* 1115 * Extract from the kernel page table the physical address 1116 * that is mapped by the given virtual address "va". Also 1117 * return L2 page table entry which maps the address. 1118 * 1119 * This is only intended to be used for panic dumps. 1120 */ 1121 vm_paddr_t 1122 pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) 1123 { 1124 vm_paddr_t pa; 1125 pt1_entry_t pte1; 1126 pt2_entry_t pte2; 1127 1128 pte1 = pte1_load(kern_pte1(va)); 1129 if (pte1_is_section(pte1)) { 1130 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1131 pte2 = pa | ATTR_TO_L2(pte1) | PTE2_V; 1132 } else if (pte1_is_link(pte1)) { 1133 pte2 = pte2_load(pt2map_entry(va)); 1134 pa = pte2_pa(pte2); 1135 } else { 1136 pte2 = 0; 1137 pa = 0; 1138 } 1139 if (pte2p != NULL) 1140 *pte2p = pte2; 1141 return (pa); 1142 } 1143 1144 /***************************************************************************** 1145 * 1146 * PMAP second stage initialization and utility functions 1147 * for bootstrap epoch. 1148 * 1149 * After pmap_bootstrap() is called, the following functions for 1150 * mappings can be used: 1151 * 1152 * void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 1153 * void pmap_kremove(vm_offset_t va); 1154 * vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1155 * int prot); 1156 * 1157 * NOTE: This is not SMP coherent stage. And physical page allocation is not 1158 * allowed during this stage. 1159 * 1160 *****************************************************************************/ 1161 1162 /* 1163 * Initialize kernel PMAP locks and lists, kernel_pmap itself, and 1164 * reserve various virtual spaces for temporary mappings. 1165 */ 1166 void 1167 pmap_bootstrap(vm_offset_t firstaddr) 1168 { 1169 pt2_entry_t *unused __unused; 1170 struct pcpu *pc; 1171 1172 /* 1173 * Initialize the kernel pmap (which is statically allocated). 1174 */ 1175 PMAP_LOCK_INIT(kernel_pmap); 1176 kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */ 1177 kernel_pmap->pm_pt1 = kern_pt1; 1178 kernel_pmap->pm_pt2tab = kern_pt2tab; 1179 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 1180 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 1181 1182 /* 1183 * Initialize the global pv list lock. 1184 */ 1185 rw_init(&pvh_global_lock, "pmap pv global"); 1186 1187 LIST_INIT(&allpmaps); 1188 1189 /* 1190 * Request a spin mutex so that changes to allpmaps cannot be 1191 * preempted by smp_rendezvous_cpus(). 1192 */ 1193 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 1194 mtx_lock_spin(&allpmaps_lock); 1195 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 1196 mtx_unlock_spin(&allpmaps_lock); 1197 1198 /* 1199 * Reserve some special page table entries/VA space for temporary 1200 * mapping of pages. 1201 */ 1202 #define SYSMAP(c, p, v, n) do { \ 1203 v = (c)pmap_preboot_reserve_pages(n); \ 1204 p = pt2map_entry((vm_offset_t)v); \ 1205 } while (0) 1206 1207 /* 1208 * Local CMAP1/CMAP2 are used for zeroing and copying pages. 1209 * Local CMAP2 is also used for data cache cleaning. 1210 */ 1211 pc = get_pcpu(); 1212 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1213 SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1); 1214 SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1); 1215 SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1); 1216 1217 /* 1218 * Crashdump maps. 1219 */ 1220 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS); 1221 1222 /* 1223 * _tmppt is used for reading arbitrary physical pages via /dev/mem. 1224 */ 1225 SYSMAP(caddr_t, unused, _tmppt, 1); 1226 1227 /* 1228 * PADDR1 and PADDR2 are used by pmap_pte2_quick() and pmap_pte2(), 1229 * respectively. PADDR3 is used by pmap_pte2_ddb(). 1230 */ 1231 SYSMAP(pt2_entry_t *, PMAP1, PADDR1, 1); 1232 SYSMAP(pt2_entry_t *, PMAP2, PADDR2, 1); 1233 #ifdef DDB 1234 SYSMAP(pt2_entry_t *, PMAP3, PADDR3, 1); 1235 #endif 1236 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 1237 1238 /* 1239 * Note that in very short time in initarm(), we are going to 1240 * initialize phys_avail[] array and no further page allocation 1241 * can happen after that until vm subsystem will be initialized. 1242 */ 1243 kernel_vm_end_new = kernel_vm_end; 1244 virtual_end = vm_max_kernel_address; 1245 } 1246 1247 static void 1248 pmap_init_reserved_pages(void) 1249 { 1250 struct pcpu *pc; 1251 vm_offset_t pages; 1252 int i; 1253 1254 CPU_FOREACH(i) { 1255 pc = pcpu_find(i); 1256 /* 1257 * Skip if the mapping has already been initialized, 1258 * i.e. this is the BSP. 1259 */ 1260 if (pc->pc_cmap1_addr != 0) 1261 continue; 1262 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1263 pages = kva_alloc(PAGE_SIZE * 3); 1264 if (pages == 0) 1265 panic("%s: unable to allocate KVA", __func__); 1266 pc->pc_cmap1_pte2p = pt2map_entry(pages); 1267 pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE); 1268 pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2)); 1269 pc->pc_cmap1_addr = (caddr_t)pages; 1270 pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE); 1271 pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); 1272 } 1273 } 1274 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 1275 1276 /* 1277 * The function can already be use in second initialization stage. 1278 * As such, the function DOES NOT call pmap_growkernel() where PT2 1279 * allocation can happen. So if used, be sure that PT2 for given 1280 * virtual address is allocated already! 1281 * 1282 * Add a wired page to the kva. 1283 * Note: not SMP coherent. 1284 */ 1285 static __inline void 1286 pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot, 1287 uint32_t attr) 1288 { 1289 pt1_entry_t *pte1p; 1290 pt2_entry_t *pte2p; 1291 1292 pte1p = kern_pte1(va); 1293 if (!pte1_is_valid(pte1_load(pte1p))) { /* XXX - sections ?! */ 1294 /* 1295 * This is a very low level function, so PT2 and particularly 1296 * PT2PG associated with given virtual address must be already 1297 * allocated. It's a pain mainly during pmap initialization 1298 * stage. However, called after pmap initialization with 1299 * virtual address not under kernel_vm_end will lead to 1300 * the same misery. 1301 */ 1302 if (!pte2_is_valid(pte2_load(kern_pt2tab_entry(va)))) 1303 panic("%s: kernel PT2 not allocated!", __func__); 1304 } 1305 1306 pte2p = pt2map_entry(va); 1307 pte2_store(pte2p, PTE2_KERN(pa, prot, attr)); 1308 } 1309 1310 PMAP_INLINE void 1311 pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1312 { 1313 1314 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT); 1315 } 1316 1317 /* 1318 * Remove a page from the kernel pagetables. 1319 * Note: not SMP coherent. 1320 */ 1321 PMAP_INLINE void 1322 pmap_kremove(vm_offset_t va) 1323 { 1324 pt1_entry_t *pte1p; 1325 pt2_entry_t *pte2p; 1326 1327 pte1p = kern_pte1(va); 1328 if (pte1_is_section(pte1_load(pte1p))) { 1329 pte1_clear(pte1p); 1330 } else { 1331 pte2p = pt2map_entry(va); 1332 pte2_clear(pte2p); 1333 } 1334 } 1335 1336 /* 1337 * Share new kernel PT2PG with all pmaps. 1338 * The caller is responsible for maintaining TLB consistency. 1339 */ 1340 static void 1341 pmap_kenter_pt2tab(vm_offset_t va, pt2_entry_t npte2) 1342 { 1343 pmap_t pmap; 1344 pt2_entry_t *pte2p; 1345 1346 mtx_lock_spin(&allpmaps_lock); 1347 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1348 pte2p = pmap_pt2tab_entry(pmap, va); 1349 pt2tab_store(pte2p, npte2); 1350 } 1351 mtx_unlock_spin(&allpmaps_lock); 1352 } 1353 1354 /* 1355 * Share new kernel PTE1 with all pmaps. 1356 * The caller is responsible for maintaining TLB consistency. 1357 */ 1358 static void 1359 pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1) 1360 { 1361 pmap_t pmap; 1362 pt1_entry_t *pte1p; 1363 1364 mtx_lock_spin(&allpmaps_lock); 1365 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1366 pte1p = pmap_pte1(pmap, va); 1367 pte1_store(pte1p, npte1); 1368 } 1369 mtx_unlock_spin(&allpmaps_lock); 1370 } 1371 1372 /* 1373 * Used to map a range of physical addresses into kernel 1374 * virtual address space. 1375 * 1376 * The value passed in '*virt' is a suggested virtual address for 1377 * the mapping. Architectures which can support a direct-mapped 1378 * physical to virtual region can return the appropriate address 1379 * within that region, leaving '*virt' unchanged. Other 1380 * architectures should map the pages starting at '*virt' and 1381 * update '*virt' with the first usable address after the mapped 1382 * region. 1383 * 1384 * NOTE: Read the comments above pmap_kenter_prot_attr() as 1385 * the function is used herein! 1386 */ 1387 vm_offset_t 1388 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1389 { 1390 vm_offset_t va, sva; 1391 vm_paddr_t pte1_offset; 1392 pt1_entry_t npte1; 1393 uint32_t l1prot, l2prot; 1394 uint32_t l1attr, l2attr; 1395 1396 PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x)," 1397 " prot = %d\n", __func__, *virt, start, end, end - start, prot)); 1398 1399 l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR; 1400 l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1401 l1prot = ATTR_TO_L1(l2prot); 1402 1403 l2attr = PTE2_ATTR_DEFAULT; 1404 l1attr = ATTR_TO_L1(l2attr); 1405 1406 va = *virt; 1407 /* 1408 * Does the physical address range's size and alignment permit at 1409 * least one section mapping to be created? 1410 */ 1411 pte1_offset = start & PTE1_OFFSET; 1412 if ((end - start) - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) >= 1413 PTE1_SIZE) { 1414 /* 1415 * Increase the starting virtual address so that its alignment 1416 * does not preclude the use of section mappings. 1417 */ 1418 if ((va & PTE1_OFFSET) < pte1_offset) 1419 va = pte1_trunc(va) + pte1_offset; 1420 else if ((va & PTE1_OFFSET) > pte1_offset) 1421 va = pte1_roundup(va) + pte1_offset; 1422 } 1423 sva = va; 1424 while (start < end) { 1425 if ((start & PTE1_OFFSET) == 0 && end - start >= PTE1_SIZE) { 1426 KASSERT((va & PTE1_OFFSET) == 0, 1427 ("%s: misaligned va %#x", __func__, va)); 1428 npte1 = PTE1_KERN(start, l1prot, l1attr); 1429 pmap_kenter_pte1(va, npte1); 1430 va += PTE1_SIZE; 1431 start += PTE1_SIZE; 1432 } else { 1433 pmap_kenter_prot_attr(va, start, l2prot, l2attr); 1434 va += PAGE_SIZE; 1435 start += PAGE_SIZE; 1436 } 1437 } 1438 tlb_flush_range(sva, va - sva); 1439 *virt = va; 1440 return (sva); 1441 } 1442 1443 /* 1444 * Make a temporary mapping for a physical address. 1445 * This is only intended to be used for panic dumps. 1446 */ 1447 void * 1448 pmap_kenter_temporary(vm_paddr_t pa, int i) 1449 { 1450 vm_offset_t va; 1451 1452 /* QQQ: 'i' should be less or equal to MAXDUMPPGS. */ 1453 1454 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 1455 pmap_kenter(va, pa); 1456 tlb_flush_local(va); 1457 return ((void *)crashdumpmap); 1458 } 1459 1460 /************************************* 1461 * 1462 * TLB & cache maintenance routines. 1463 * 1464 *************************************/ 1465 1466 /* 1467 * We inline these within pmap.c for speed. 1468 */ 1469 PMAP_INLINE void 1470 pmap_tlb_flush(pmap_t pmap, vm_offset_t va) 1471 { 1472 1473 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1474 tlb_flush(va); 1475 } 1476 1477 PMAP_INLINE void 1478 pmap_tlb_flush_range(pmap_t pmap, vm_offset_t sva, vm_size_t size) 1479 { 1480 1481 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1482 tlb_flush_range(sva, size); 1483 } 1484 1485 /* 1486 * Abuse the pte2 nodes for unmapped kva to thread a kva freelist through. 1487 * Requirements: 1488 * - Must deal with pages in order to ensure that none of the PTE2_* bits 1489 * are ever set, PTE2_V in particular. 1490 * - Assumes we can write to pte2s without pte2_store() atomic ops. 1491 * - Assumes nothing will ever test these addresses for 0 to indicate 1492 * no mapping instead of correctly checking PTE2_V. 1493 * - Assumes a vm_offset_t will fit in a pte2 (true for arm). 1494 * Because PTE2_V is never set, there can be no mappings to invalidate. 1495 */ 1496 static vm_offset_t 1497 pmap_pte2list_alloc(vm_offset_t *head) 1498 { 1499 pt2_entry_t *pte2p; 1500 vm_offset_t va; 1501 1502 va = *head; 1503 if (va == 0) 1504 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 1505 pte2p = pt2map_entry(va); 1506 *head = *pte2p; 1507 if (*head & PTE2_V) 1508 panic("%s: va with PTE2_V set!", __func__); 1509 *pte2p = 0; 1510 return (va); 1511 } 1512 1513 static void 1514 pmap_pte2list_free(vm_offset_t *head, vm_offset_t va) 1515 { 1516 pt2_entry_t *pte2p; 1517 1518 if (va & PTE2_V) 1519 panic("%s: freeing va with PTE2_V set!", __func__); 1520 pte2p = pt2map_entry(va); 1521 *pte2p = *head; /* virtual! PTE2_V is 0 though */ 1522 *head = va; 1523 } 1524 1525 static void 1526 pmap_pte2list_init(vm_offset_t *head, void *base, int npages) 1527 { 1528 int i; 1529 vm_offset_t va; 1530 1531 *head = 0; 1532 for (i = npages - 1; i >= 0; i--) { 1533 va = (vm_offset_t)base + i * PAGE_SIZE; 1534 pmap_pte2list_free(head, va); 1535 } 1536 } 1537 1538 /***************************************************************************** 1539 * 1540 * PMAP third and final stage initialization. 1541 * 1542 * After pmap_init() is called, PMAP subsystem is fully initialized. 1543 * 1544 *****************************************************************************/ 1545 1546 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 1547 "VM/pmap parameters"); 1548 1549 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 1550 "Max number of PV entries"); 1551 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 1552 "Page share factor per proc"); 1553 1554 static u_long nkpt2pg = NKPT2PG; 1555 SYSCTL_ULONG(_vm_pmap, OID_AUTO, nkpt2pg, CTLFLAG_RD, 1556 &nkpt2pg, 0, "Pre-allocated pages for kernel PT2s"); 1557 1558 static int sp_enabled = 1; 1559 SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 1560 &sp_enabled, 0, "Are large page mappings enabled?"); 1561 1562 bool 1563 pmap_ps_enabled(pmap_t pmap __unused) 1564 { 1565 1566 return (sp_enabled != 0); 1567 } 1568 1569 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pte1, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 1570 "1MB page mapping counters"); 1571 1572 static u_long pmap_pte1_demotions; 1573 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, demotions, CTLFLAG_RD, 1574 &pmap_pte1_demotions, 0, "1MB page demotions"); 1575 1576 static u_long pmap_pte1_mappings; 1577 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, mappings, CTLFLAG_RD, 1578 &pmap_pte1_mappings, 0, "1MB page mappings"); 1579 1580 static u_long pmap_pte1_p_failures; 1581 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, p_failures, CTLFLAG_RD, 1582 &pmap_pte1_p_failures, 0, "1MB page promotion failures"); 1583 1584 static u_long pmap_pte1_promotions; 1585 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD, 1586 &pmap_pte1_promotions, 0, "1MB page promotions"); 1587 1588 static u_long pmap_pte1_kern_demotions; 1589 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD, 1590 &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions"); 1591 1592 static u_long pmap_pte1_kern_promotions; 1593 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD, 1594 &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions"); 1595 1596 static __inline ttb_entry_t 1597 pmap_ttb_get(pmap_t pmap) 1598 { 1599 1600 return (vtophys(pmap->pm_pt1) | ttb_flags); 1601 } 1602 1603 /* 1604 * Initialize a vm_page's machine-dependent fields. 1605 * 1606 * Variations: 1607 * 1. Pages for L2 page tables are always not managed. So, pv_list and 1608 * pt2_wirecount can share same physical space. However, proper 1609 * initialization on a page alloc for page tables and reinitialization 1610 * on the page free must be ensured. 1611 */ 1612 void 1613 pmap_page_init(vm_page_t m) 1614 { 1615 1616 TAILQ_INIT(&m->md.pv_list); 1617 pt2_wirecount_init(m); 1618 m->md.pat_mode = VM_MEMATTR_DEFAULT; 1619 } 1620 1621 /* 1622 * Virtualization for faster way how to zero whole page. 1623 */ 1624 static __inline void 1625 pagezero(void *page) 1626 { 1627 1628 bzero(page, PAGE_SIZE); 1629 } 1630 1631 /* 1632 * Zero L2 page table page. 1633 * Use same KVA as in pmap_zero_page(). 1634 */ 1635 static __inline vm_paddr_t 1636 pmap_pt2pg_zero(vm_page_t m) 1637 { 1638 pt2_entry_t *cmap2_pte2p; 1639 vm_paddr_t pa; 1640 struct pcpu *pc; 1641 1642 pa = VM_PAGE_TO_PHYS(m); 1643 1644 /* 1645 * XXX: For now, we map whole page even if it's already zero, 1646 * to sync it even if the sync is only DSB. 1647 */ 1648 sched_pin(); 1649 pc = get_pcpu(); 1650 cmap2_pte2p = pc->pc_cmap2_pte2p; 1651 mtx_lock(&pc->pc_cmap_lock); 1652 if (pte2_load(cmap2_pte2p) != 0) 1653 panic("%s: CMAP2 busy", __func__); 1654 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 1655 vm_page_pte2_attr(m))); 1656 /* Even VM_ALLOC_ZERO request is only advisory. */ 1657 if ((m->flags & PG_ZERO) == 0) 1658 pagezero(pc->pc_cmap2_addr); 1659 pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE); 1660 pte2_clear(cmap2_pte2p); 1661 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 1662 1663 /* 1664 * Unpin the thread before releasing the lock. Otherwise the thread 1665 * could be rescheduled while still bound to the current CPU, only 1666 * to unpin itself immediately upon resuming execution. 1667 */ 1668 sched_unpin(); 1669 mtx_unlock(&pc->pc_cmap_lock); 1670 1671 return (pa); 1672 } 1673 1674 /* 1675 * Init just allocated page as L2 page table(s) holder 1676 * and return its physical address. 1677 */ 1678 static __inline vm_paddr_t 1679 pmap_pt2pg_init(pmap_t pmap, vm_offset_t va, vm_page_t m) 1680 { 1681 vm_paddr_t pa; 1682 pt2_entry_t *pte2p; 1683 1684 /* Check page attributes. */ 1685 if (m->md.pat_mode != pt_memattr) 1686 pmap_page_set_memattr(m, pt_memattr); 1687 1688 /* Zero page and init wire counts. */ 1689 pa = pmap_pt2pg_zero(m); 1690 pt2_wirecount_init(m); 1691 1692 /* 1693 * Map page to PT2MAP address space for given pmap. 1694 * Note that PT2MAP space is shared with all pmaps. 1695 */ 1696 if (pmap == kernel_pmap) 1697 pmap_kenter_pt2tab(va, PTE2_KPT(pa)); 1698 else { 1699 pte2p = pmap_pt2tab_entry(pmap, va); 1700 pt2tab_store(pte2p, PTE2_KPT_NG(pa)); 1701 } 1702 1703 return (pa); 1704 } 1705 1706 /* 1707 * Initialize the pmap module. 1708 * Called by vm_init, to initialize any structures that the pmap 1709 * system needs to map virtual memory. 1710 */ 1711 void 1712 pmap_init(void) 1713 { 1714 vm_size_t s; 1715 pt2_entry_t *pte2p, pte2; 1716 u_int i, pte1_idx, pv_npg; 1717 1718 PDEBUG(1, printf("%s: phys_start = %#x\n", __func__, PHYSADDR)); 1719 1720 /* 1721 * Initialize the vm page array entries for kernel pmap's 1722 * L2 page table pages allocated in advance. 1723 */ 1724 pte1_idx = pte1_index(KERNBASE - PT2MAP_SIZE); 1725 pte2p = kern_pt2tab_entry(KERNBASE - PT2MAP_SIZE); 1726 for (i = 0; i < nkpt2pg + NPG_IN_PT2TAB; i++, pte2p++) { 1727 vm_paddr_t pa; 1728 vm_page_t m; 1729 1730 pte2 = pte2_load(pte2p); 1731 KASSERT(pte2_is_valid(pte2), ("%s: no valid entry", __func__)); 1732 1733 pa = pte2_pa(pte2); 1734 m = PHYS_TO_VM_PAGE(pa); 1735 KASSERT(m >= vm_page_array && 1736 m < &vm_page_array[vm_page_array_size], 1737 ("%s: L2 page table page is out of range", __func__)); 1738 1739 m->pindex = pte1_idx; 1740 m->phys_addr = pa; 1741 pte1_idx += NPT2_IN_PG; 1742 } 1743 1744 /* 1745 * Initialize the address space (zone) for the pv entries. Set a 1746 * high water mark so that the system can recover from excessive 1747 * numbers of pv entries. 1748 */ 1749 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1750 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1751 TUNABLE_INT_FETCH("vm.pmap.pv_entry_max", &pv_entry_max); 1752 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1753 pv_entry_high_water = 9 * (pv_entry_max / 10); 1754 1755 /* 1756 * Are large page mappings enabled? 1757 */ 1758 TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled); 1759 if (sp_enabled) { 1760 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1761 ("%s: can't assign to pagesizes[1]", __func__)); 1762 pagesizes[1] = PTE1_SIZE; 1763 } 1764 1765 /* 1766 * Calculate the size of the pv head table for sections. 1767 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1768 * Note that the table is only for sections which could be promoted. 1769 */ 1770 first_managed_pa = pte1_trunc(vm_phys_segs[0].start); 1771 pv_npg = (pte1_trunc(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE) 1772 - first_managed_pa) / PTE1_SIZE + 1; 1773 1774 /* 1775 * Allocate memory for the pv head table for sections. 1776 */ 1777 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1778 s = round_page(s); 1779 pv_table = kmem_malloc(s, M_WAITOK | M_ZERO); 1780 for (i = 0; i < pv_npg; i++) 1781 TAILQ_INIT(&pv_table[i].pv_list); 1782 1783 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1784 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1785 if (pv_chunkbase == NULL) 1786 panic("%s: not enough kvm for pv chunks", __func__); 1787 pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1788 } 1789 1790 /* 1791 * Add a list of wired pages to the kva 1792 * this routine is only used for temporary 1793 * kernel mappings that do not need to have 1794 * page modification or references recorded. 1795 * Note that old mappings are simply written 1796 * over. The page *must* be wired. 1797 * Note: SMP coherent. Uses a ranged shootdown IPI. 1798 */ 1799 void 1800 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1801 { 1802 u_int anychanged; 1803 pt2_entry_t *epte2p, *pte2p, pte2; 1804 vm_page_t m; 1805 vm_paddr_t pa; 1806 1807 anychanged = 0; 1808 pte2p = pt2map_entry(sva); 1809 epte2p = pte2p + count; 1810 while (pte2p < epte2p) { 1811 m = *ma++; 1812 pa = VM_PAGE_TO_PHYS(m); 1813 pte2 = pte2_load(pte2p); 1814 if ((pte2_pa(pte2) != pa) || 1815 (pte2_attr(pte2) != vm_page_pte2_attr(m))) { 1816 anychanged++; 1817 pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW, 1818 vm_page_pte2_attr(m))); 1819 } 1820 pte2p++; 1821 } 1822 if (__predict_false(anychanged)) 1823 tlb_flush_range(sva, count * PAGE_SIZE); 1824 } 1825 1826 /* 1827 * This routine tears out page mappings from the 1828 * kernel -- it is meant only for temporary mappings. 1829 * Note: SMP coherent. Uses a ranged shootdown IPI. 1830 */ 1831 void 1832 pmap_qremove(vm_offset_t sva, int count) 1833 { 1834 vm_offset_t va; 1835 1836 va = sva; 1837 while (count-- > 0) { 1838 pmap_kremove(va); 1839 va += PAGE_SIZE; 1840 } 1841 tlb_flush_range(sva, va - sva); 1842 } 1843 1844 /* 1845 * Are we current address space or kernel? 1846 */ 1847 static __inline int 1848 pmap_is_current(pmap_t pmap) 1849 { 1850 1851 return (pmap == kernel_pmap || 1852 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace))); 1853 } 1854 1855 /* 1856 * If the given pmap is not the current or kernel pmap, the returned 1857 * pte2 must be released by passing it to pmap_pte2_release(). 1858 */ 1859 static pt2_entry_t * 1860 pmap_pte2(pmap_t pmap, vm_offset_t va) 1861 { 1862 pt1_entry_t pte1; 1863 vm_paddr_t pt2pg_pa; 1864 1865 pte1 = pte1_load(pmap_pte1(pmap, va)); 1866 if (pte1_is_section(pte1)) 1867 panic("%s: attempt to map PTE1", __func__); 1868 if (pte1_is_link(pte1)) { 1869 /* Are we current address space or kernel? */ 1870 if (pmap_is_current(pmap)) 1871 return (pt2map_entry(va)); 1872 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1873 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1874 mtx_lock(&PMAP2mutex); 1875 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 1876 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 1877 tlb_flush((vm_offset_t)PADDR2); 1878 } 1879 return (PADDR2 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1880 } 1881 return (NULL); 1882 } 1883 1884 /* 1885 * Releases a pte2 that was obtained from pmap_pte2(). 1886 * Be prepared for the pte2p being NULL. 1887 */ 1888 static __inline void 1889 pmap_pte2_release(pt2_entry_t *pte2p) 1890 { 1891 1892 if ((pt2_entry_t *)(trunc_page((vm_offset_t)pte2p)) == PADDR2) { 1893 mtx_unlock(&PMAP2mutex); 1894 } 1895 } 1896 1897 /* 1898 * Super fast pmap_pte2 routine best used when scanning 1899 * the pv lists. This eliminates many coarse-grained 1900 * invltlb calls. Note that many of the pv list 1901 * scans are across different pmaps. It is very wasteful 1902 * to do an entire tlb flush for checking a single mapping. 1903 * 1904 * If the given pmap is not the current pmap, pvh_global_lock 1905 * must be held and curthread pinned to a CPU. 1906 */ 1907 static pt2_entry_t * 1908 pmap_pte2_quick(pmap_t pmap, vm_offset_t va) 1909 { 1910 pt1_entry_t pte1; 1911 vm_paddr_t pt2pg_pa; 1912 1913 pte1 = pte1_load(pmap_pte1(pmap, va)); 1914 if (pte1_is_section(pte1)) 1915 panic("%s: attempt to map PTE1", __func__); 1916 if (pte1_is_link(pte1)) { 1917 /* Are we current address space or kernel? */ 1918 if (pmap_is_current(pmap)) 1919 return (pt2map_entry(va)); 1920 rw_assert(&pvh_global_lock, RA_WLOCKED); 1921 KASSERT(curthread->td_pinned > 0, 1922 ("%s: curthread not pinned", __func__)); 1923 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1924 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1925 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 1926 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 1927 #ifdef SMP 1928 PMAP1cpu = PCPU_GET(cpuid); 1929 #endif 1930 tlb_flush_local((vm_offset_t)PADDR1); 1931 PMAP1changed++; 1932 } else 1933 #ifdef SMP 1934 if (PMAP1cpu != PCPU_GET(cpuid)) { 1935 PMAP1cpu = PCPU_GET(cpuid); 1936 tlb_flush_local((vm_offset_t)PADDR1); 1937 PMAP1changedcpu++; 1938 } else 1939 #endif 1940 PMAP1unchanged++; 1941 return (PADDR1 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1942 } 1943 return (NULL); 1944 } 1945 1946 /* 1947 * Routine: pmap_extract 1948 * Function: 1949 * Extract the physical page address associated 1950 * with the given map/virtual_address pair. 1951 */ 1952 vm_paddr_t 1953 pmap_extract(pmap_t pmap, vm_offset_t va) 1954 { 1955 vm_paddr_t pa; 1956 pt1_entry_t pte1; 1957 pt2_entry_t *pte2p; 1958 1959 PMAP_LOCK(pmap); 1960 pte1 = pte1_load(pmap_pte1(pmap, va)); 1961 if (pte1_is_section(pte1)) 1962 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1963 else if (pte1_is_link(pte1)) { 1964 pte2p = pmap_pte2(pmap, va); 1965 pa = pte2_pa(pte2_load(pte2p)) | (va & PTE2_OFFSET); 1966 pmap_pte2_release(pte2p); 1967 } else 1968 pa = 0; 1969 PMAP_UNLOCK(pmap); 1970 return (pa); 1971 } 1972 1973 /* 1974 * Routine: pmap_extract_and_hold 1975 * Function: 1976 * Atomically extract and hold the physical page 1977 * with the given pmap and virtual address pair 1978 * if that mapping permits the given protection. 1979 */ 1980 vm_page_t 1981 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1982 { 1983 vm_paddr_t pa; 1984 pt1_entry_t pte1; 1985 pt2_entry_t pte2, *pte2p; 1986 vm_page_t m; 1987 1988 m = NULL; 1989 PMAP_LOCK(pmap); 1990 pte1 = pte1_load(pmap_pte1(pmap, va)); 1991 if (pte1_is_section(pte1)) { 1992 if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) { 1993 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1994 m = PHYS_TO_VM_PAGE(pa); 1995 if (!vm_page_wire_mapped(m)) 1996 m = NULL; 1997 } 1998 } else if (pte1_is_link(pte1)) { 1999 pte2p = pmap_pte2(pmap, va); 2000 pte2 = pte2_load(pte2p); 2001 pmap_pte2_release(pte2p); 2002 if (pte2_is_valid(pte2) && 2003 (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) { 2004 pa = pte2_pa(pte2); 2005 m = PHYS_TO_VM_PAGE(pa); 2006 if (!vm_page_wire_mapped(m)) 2007 m = NULL; 2008 } 2009 } 2010 PMAP_UNLOCK(pmap); 2011 return (m); 2012 } 2013 2014 /* 2015 * Grow the number of kernel L2 page table entries, if needed. 2016 */ 2017 void 2018 pmap_growkernel(vm_offset_t addr) 2019 { 2020 vm_page_t m; 2021 vm_paddr_t pt2pg_pa, pt2_pa; 2022 pt1_entry_t pte1; 2023 pt2_entry_t pte2; 2024 2025 PDEBUG(1, printf("%s: addr = %#x\n", __func__, addr)); 2026 /* 2027 * All the time kernel_vm_end is first KVA for which underlying 2028 * L2 page table is either not allocated or linked from L1 page table 2029 * (not considering sections). Except for two possible cases: 2030 * 2031 * (1) in the very beginning as long as pmap_growkernel() was 2032 * not called, it could be first unused KVA (which is not 2033 * rounded up to PTE1_SIZE), 2034 * 2035 * (2) when all KVA space is mapped and vm_map_max(kernel_map) 2036 * address is not rounded up to PTE1_SIZE. (For example, 2037 * it could be 0xFFFFFFFF.) 2038 */ 2039 kernel_vm_end = pte1_roundup(kernel_vm_end); 2040 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2041 addr = roundup2(addr, PTE1_SIZE); 2042 if (addr - 1 >= vm_map_max(kernel_map)) 2043 addr = vm_map_max(kernel_map); 2044 while (kernel_vm_end < addr) { 2045 pte1 = pte1_load(kern_pte1(kernel_vm_end)); 2046 if (pte1_is_valid(pte1)) { 2047 kernel_vm_end += PTE1_SIZE; 2048 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2049 kernel_vm_end = vm_map_max(kernel_map); 2050 break; 2051 } 2052 continue; 2053 } 2054 2055 /* 2056 * kernel_vm_end_new is used in pmap_pinit() when kernel 2057 * mappings are entered to new pmap all at once to avoid race 2058 * between pmap_kenter_pte1() and kernel_vm_end increase. 2059 * The same aplies to pmap_kenter_pt2tab(). 2060 */ 2061 kernel_vm_end_new = kernel_vm_end + PTE1_SIZE; 2062 2063 pte2 = pt2tab_load(kern_pt2tab_entry(kernel_vm_end)); 2064 if (!pte2_is_valid(pte2)) { 2065 /* 2066 * Install new PT2s page into kernel PT2TAB. 2067 */ 2068 m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | 2069 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2070 if (m == NULL) 2071 panic("%s: no memory to grow kernel", __func__); 2072 m->pindex = pte1_index(kernel_vm_end) & ~PT2PG_MASK; 2073 2074 /* 2075 * QQQ: To link all new L2 page tables from L1 page 2076 * table now and so pmap_kenter_pte1() them 2077 * at once together with pmap_kenter_pt2tab() 2078 * could be nice speed up. However, 2079 * pmap_growkernel() does not happen so often... 2080 * QQQ: The other TTBR is another option. 2081 */ 2082 pt2pg_pa = pmap_pt2pg_init(kernel_pmap, kernel_vm_end, 2083 m); 2084 } else 2085 pt2pg_pa = pte2_pa(pte2); 2086 2087 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(kernel_vm_end)); 2088 pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa)); 2089 2090 kernel_vm_end = kernel_vm_end_new; 2091 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2092 kernel_vm_end = vm_map_max(kernel_map); 2093 break; 2094 } 2095 } 2096 } 2097 2098 static int 2099 kvm_size(SYSCTL_HANDLER_ARGS) 2100 { 2101 unsigned long ksize = vm_max_kernel_address - KERNBASE; 2102 2103 return (sysctl_handle_long(oidp, &ksize, 0, req)); 2104 } 2105 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, 2106 CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0, kvm_size, "IU", 2107 "Size of KVM"); 2108 2109 static int 2110 kvm_free(SYSCTL_HANDLER_ARGS) 2111 { 2112 unsigned long kfree = vm_max_kernel_address - kernel_vm_end; 2113 2114 return (sysctl_handle_long(oidp, &kfree, 0, req)); 2115 } 2116 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, 2117 CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0, kvm_free, "IU", 2118 "Amount of KVM free"); 2119 2120 /*********************************************** 2121 * 2122 * Pmap allocation/deallocation routines. 2123 * 2124 ***********************************************/ 2125 2126 /* 2127 * Initialize the pmap for the swapper process. 2128 */ 2129 void 2130 pmap_pinit0(pmap_t pmap) 2131 { 2132 PDEBUG(1, printf("%s: pmap = %p\n", __func__, pmap)); 2133 2134 PMAP_LOCK_INIT(pmap); 2135 2136 /* 2137 * Kernel page table directory and pmap stuff around is already 2138 * initialized, we are using it right now and here. So, finish 2139 * only PMAP structures initialization for process0 ... 2140 * 2141 * Since the L1 page table and PT2TAB is shared with the kernel pmap, 2142 * which is already included in the list "allpmaps", this pmap does 2143 * not need to be inserted into that list. 2144 */ 2145 pmap->pm_pt1 = kern_pt1; 2146 pmap->pm_pt2tab = kern_pt2tab; 2147 CPU_ZERO(&pmap->pm_active); 2148 PCPU_SET(curpmap, pmap); 2149 TAILQ_INIT(&pmap->pm_pvchunk); 2150 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2151 CPU_SET(0, &pmap->pm_active); 2152 } 2153 2154 static __inline void 2155 pte1_copy_nosync(pt1_entry_t *spte1p, pt1_entry_t *dpte1p, vm_offset_t sva, 2156 vm_offset_t eva) 2157 { 2158 u_int idx, count; 2159 2160 idx = pte1_index(sva); 2161 count = (pte1_index(eva) - idx + 1) * sizeof(pt1_entry_t); 2162 bcopy(spte1p + idx, dpte1p + idx, count); 2163 } 2164 2165 static __inline void 2166 pt2tab_copy_nosync(pt2_entry_t *spte2p, pt2_entry_t *dpte2p, vm_offset_t sva, 2167 vm_offset_t eva) 2168 { 2169 u_int idx, count; 2170 2171 idx = pt2tab_index(sva); 2172 count = (pt2tab_index(eva) - idx + 1) * sizeof(pt2_entry_t); 2173 bcopy(spte2p + idx, dpte2p + idx, count); 2174 } 2175 2176 /* 2177 * Initialize a preallocated and zeroed pmap structure, 2178 * such as one in a vmspace structure. 2179 */ 2180 int 2181 pmap_pinit(pmap_t pmap) 2182 { 2183 pt1_entry_t *pte1p; 2184 pt2_entry_t *pte2p; 2185 vm_paddr_t pa, pt2tab_pa; 2186 u_int i; 2187 2188 PDEBUG(6, printf("%s: pmap = %p, pm_pt1 = %p\n", __func__, pmap, 2189 pmap->pm_pt1)); 2190 2191 /* 2192 * No need to allocate L2 page table space yet but we do need 2193 * a valid L1 page table and PT2TAB table. 2194 * 2195 * Install shared kernel mappings to these tables. It's a little 2196 * tricky as some parts of KVA are reserved for vectors, devices, 2197 * and whatever else. These parts are supposed to be above 2198 * vm_max_kernel_address. Thus two regions should be installed: 2199 * 2200 * (1) <KERNBASE, kernel_vm_end), 2201 * (2) <vm_max_kernel_address, 0xFFFFFFFF>. 2202 * 2203 * QQQ: The second region should be stable enough to be installed 2204 * only once in time when the tables are allocated. 2205 * QQQ: Maybe copy of both regions at once could be faster ... 2206 * QQQ: Maybe the other TTBR is an option. 2207 * 2208 * Finally, install own PT2TAB table to these tables. 2209 */ 2210 2211 if (pmap->pm_pt1 == NULL) { 2212 pmap->pm_pt1 = kmem_alloc_contig(NB_IN_PT1, 2213 M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr); 2214 if (pmap->pm_pt1 == NULL) 2215 return (0); 2216 } 2217 if (pmap->pm_pt2tab == NULL) { 2218 /* 2219 * QQQ: (1) PT2TAB must be contiguous. If PT2TAB is one page 2220 * only, what should be the only size for 32 bit systems, 2221 * then we could allocate it with vm_page_alloc() and all 2222 * the stuff needed as other L2 page table pages. 2223 * (2) Note that a process PT2TAB is special L2 page table 2224 * page. Its mapping in kernel_arena is permanent and can 2225 * be used no matter which process is current. Its mapping 2226 * in PT2MAP can be used only for current process. 2227 */ 2228 pmap->pm_pt2tab = kmem_alloc_attr(NB_IN_PT2TAB, 2229 M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr); 2230 if (pmap->pm_pt2tab == NULL) { 2231 /* 2232 * QQQ: As struct pmap is allocated from UMA with 2233 * UMA_ZONE_NOFREE flag, it's important to leave 2234 * no allocation in pmap if initialization failed. 2235 */ 2236 kmem_free(pmap->pm_pt1, NB_IN_PT1); 2237 pmap->pm_pt1 = NULL; 2238 return (0); 2239 } 2240 /* 2241 * QQQ: Each L2 page table page vm_page_t has pindex set to 2242 * pte1 index of virtual address mapped by this page. 2243 * It's not valid for non kernel PT2TABs themselves. 2244 * The pindex of these pages can not be altered because 2245 * of the way how they are allocated now. However, it 2246 * should not be a problem. 2247 */ 2248 } 2249 2250 mtx_lock_spin(&allpmaps_lock); 2251 /* 2252 * To avoid race with pmap_kenter_pte1() and pmap_kenter_pt2tab(), 2253 * kernel_vm_end_new is used here instead of kernel_vm_end. 2254 */ 2255 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, KERNBASE, 2256 kernel_vm_end_new - 1); 2257 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, vm_max_kernel_address, 2258 0xFFFFFFFF); 2259 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, KERNBASE, 2260 kernel_vm_end_new - 1); 2261 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, vm_max_kernel_address, 2262 0xFFFFFFFF); 2263 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 2264 mtx_unlock_spin(&allpmaps_lock); 2265 2266 /* 2267 * Store PT2MAP PT2 pages (a.k.a. PT2TAB) in PT2TAB itself. 2268 * I.e. self reference mapping. The PT2TAB is private, however mapped 2269 * into shared PT2MAP space, so the mapping should be not global. 2270 */ 2271 pt2tab_pa = vtophys(pmap->pm_pt2tab); 2272 pte2p = pmap_pt2tab_entry(pmap, (vm_offset_t)PT2MAP); 2273 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 2274 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 2275 } 2276 2277 /* Insert PT2MAP PT2s into pmap PT1. */ 2278 pte1p = pmap_pte1(pmap, (vm_offset_t)PT2MAP); 2279 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 2280 pte1_store(pte1p++, PTE1_LINK(pa)); 2281 } 2282 2283 /* 2284 * Now synchronize new mapping which was made above. 2285 */ 2286 pte1_sync_range(pmap->pm_pt1, NB_IN_PT1); 2287 pte2_sync_range(pmap->pm_pt2tab, NB_IN_PT2TAB); 2288 2289 CPU_ZERO(&pmap->pm_active); 2290 TAILQ_INIT(&pmap->pm_pvchunk); 2291 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2292 2293 return (1); 2294 } 2295 2296 #ifdef INVARIANTS 2297 static boolean_t 2298 pt2tab_user_is_empty(pt2_entry_t *tab) 2299 { 2300 u_int i, end; 2301 2302 end = pt2tab_index(VM_MAXUSER_ADDRESS); 2303 for (i = 0; i < end; i++) 2304 if (tab[i] != 0) return (FALSE); 2305 return (TRUE); 2306 } 2307 #endif 2308 /* 2309 * Release any resources held by the given physical map. 2310 * Called when a pmap initialized by pmap_pinit is being released. 2311 * Should only be called if the map contains no valid mappings. 2312 */ 2313 void 2314 pmap_release(pmap_t pmap) 2315 { 2316 #ifdef INVARIANTS 2317 vm_offset_t start, end; 2318 #endif 2319 KASSERT(pmap->pm_stats.resident_count == 0, 2320 ("%s: pmap resident count %ld != 0", __func__, 2321 pmap->pm_stats.resident_count)); 2322 KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab), 2323 ("%s: has allocated user PT2(s)", __func__)); 2324 KASSERT(CPU_EMPTY(&pmap->pm_active), 2325 ("%s: pmap %p is active on some CPU(s)", __func__, pmap)); 2326 2327 mtx_lock_spin(&allpmaps_lock); 2328 LIST_REMOVE(pmap, pm_list); 2329 mtx_unlock_spin(&allpmaps_lock); 2330 2331 #ifdef INVARIANTS 2332 start = pte1_index(KERNBASE) * sizeof(pt1_entry_t); 2333 end = (pte1_index(0xFFFFFFFF) + 1) * sizeof(pt1_entry_t); 2334 bzero((char *)pmap->pm_pt1 + start, end - start); 2335 2336 start = pt2tab_index(KERNBASE) * sizeof(pt2_entry_t); 2337 end = (pt2tab_index(0xFFFFFFFF) + 1) * sizeof(pt2_entry_t); 2338 bzero((char *)pmap->pm_pt2tab + start, end - start); 2339 #endif 2340 /* 2341 * We are leaving PT1 and PT2TAB allocated on released pmap, 2342 * so hopefully UMA vmspace_zone will always be inited with 2343 * UMA_ZONE_NOFREE flag. 2344 */ 2345 } 2346 2347 /********************************************************* 2348 * 2349 * L2 table pages and their pages management routines. 2350 * 2351 *********************************************************/ 2352 2353 /* 2354 * Virtual interface for L2 page table wire counting. 2355 * 2356 * Each L2 page table in a page has own counter which counts a number of 2357 * valid mappings in a table. Global page counter counts mappings in all 2358 * tables in a page plus a single itself mapping in PT2TAB. 2359 * 2360 * During a promotion we leave the associated L2 page table counter 2361 * untouched, so the table (strictly speaking a page which holds it) 2362 * is never freed if promoted. 2363 * 2364 * If a page m->ref_count == 1 then no valid mappings exist in any L2 page 2365 * table in the page and the page itself is only mapped in PT2TAB. 2366 */ 2367 2368 static __inline void 2369 pt2_wirecount_init(vm_page_t m) 2370 { 2371 u_int i; 2372 2373 /* 2374 * Note: A page m is allocated with VM_ALLOC_WIRED flag and 2375 * m->ref_count should be already set correctly. 2376 * So, there is no need to set it again herein. 2377 */ 2378 for (i = 0; i < NPT2_IN_PG; i++) 2379 m->md.pt2_wirecount[i] = 0; 2380 } 2381 2382 static __inline void 2383 pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx) 2384 { 2385 2386 /* 2387 * Note: A just modificated pte2 (i.e. already allocated) 2388 * is acquiring one extra reference which must be 2389 * explicitly cleared. It influences the KASSERTs herein. 2390 * All L2 page tables in a page always belong to the same 2391 * pmap, so we allow only one extra reference for the page. 2392 */ 2393 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1), 2394 ("%s: PT2 is overflowing ...", __func__)); 2395 KASSERT(m->ref_count <= (NPTE2_IN_PG + 1), 2396 ("%s: PT2PG is overflowing ...", __func__)); 2397 2398 m->ref_count++; 2399 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++; 2400 } 2401 2402 static __inline void 2403 pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx) 2404 { 2405 2406 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0, 2407 ("%s: PT2 is underflowing ...", __func__)); 2408 KASSERT(m->ref_count > 1, 2409 ("%s: PT2PG is underflowing ...", __func__)); 2410 2411 m->ref_count--; 2412 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--; 2413 } 2414 2415 static __inline void 2416 pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count) 2417 { 2418 2419 KASSERT(count <= NPTE2_IN_PT2, 2420 ("%s: invalid count %u", __func__, count)); 2421 KASSERT(m->ref_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK], 2422 ("%s: PT2PG corrupting (%u, %u) ...", __func__, m->ref_count, 2423 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK])); 2424 2425 m->ref_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]; 2426 m->ref_count += count; 2427 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count; 2428 2429 KASSERT(m->ref_count <= (NPTE2_IN_PG + 1), 2430 ("%s: PT2PG is overflowed (%u) ...", __func__, m->ref_count)); 2431 } 2432 2433 static __inline uint32_t 2434 pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx) 2435 { 2436 2437 return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]); 2438 } 2439 2440 static __inline boolean_t 2441 pt2_is_empty(vm_page_t m, vm_offset_t va) 2442 { 2443 2444 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0); 2445 } 2446 2447 static __inline boolean_t 2448 pt2_is_full(vm_page_t m, vm_offset_t va) 2449 { 2450 2451 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 2452 NPTE2_IN_PT2); 2453 } 2454 2455 static __inline boolean_t 2456 pt2pg_is_empty(vm_page_t m) 2457 { 2458 2459 return (m->ref_count == 1); 2460 } 2461 2462 /* 2463 * This routine is called if the L2 page table 2464 * is not mapped correctly. 2465 */ 2466 static vm_page_t 2467 _pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2468 { 2469 uint32_t pte1_idx; 2470 pt1_entry_t *pte1p; 2471 pt2_entry_t pte2; 2472 vm_page_t m; 2473 vm_paddr_t pt2pg_pa, pt2_pa; 2474 2475 pte1_idx = pte1_index(va); 2476 pte1p = pmap->pm_pt1 + pte1_idx; 2477 2478 KASSERT(pte1_load(pte1p) == 0, 2479 ("%s: pm_pt1[%#x] is not zero: %#x", __func__, pte1_idx, 2480 pte1_load(pte1p))); 2481 2482 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, va)); 2483 if (!pte2_is_valid(pte2)) { 2484 /* 2485 * Install new PT2s page into pmap PT2TAB. 2486 */ 2487 m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2488 if (m == NULL) { 2489 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2490 PMAP_UNLOCK(pmap); 2491 rw_wunlock(&pvh_global_lock); 2492 vm_wait(NULL); 2493 rw_wlock(&pvh_global_lock); 2494 PMAP_LOCK(pmap); 2495 } 2496 2497 /* 2498 * Indicate the need to retry. While waiting, 2499 * the L2 page table page may have been allocated. 2500 */ 2501 return (NULL); 2502 } 2503 m->pindex = pte1_idx & ~PT2PG_MASK; 2504 pmap->pm_stats.resident_count++; 2505 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 2506 } else { 2507 pt2pg_pa = pte2_pa(pte2); 2508 m = PHYS_TO_VM_PAGE(pt2pg_pa); 2509 } 2510 2511 pt2_wirecount_inc(m, pte1_idx); 2512 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 2513 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 2514 2515 return (m); 2516 } 2517 2518 static vm_page_t 2519 pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2520 { 2521 u_int pte1_idx; 2522 pt1_entry_t *pte1p, pte1; 2523 vm_page_t m; 2524 2525 pte1_idx = pte1_index(va); 2526 retry: 2527 pte1p = pmap->pm_pt1 + pte1_idx; 2528 pte1 = pte1_load(pte1p); 2529 2530 /* 2531 * This supports switching from a 1MB page to a 2532 * normal 4K page. 2533 */ 2534 if (pte1_is_section(pte1)) { 2535 (void)pmap_demote_pte1(pmap, pte1p, va); 2536 /* 2537 * Reload pte1 after demotion. 2538 * 2539 * Note: Demotion can even fail as either PT2 is not find for 2540 * the virtual address or PT2PG can not be allocated. 2541 */ 2542 pte1 = pte1_load(pte1p); 2543 } 2544 2545 /* 2546 * If the L2 page table page is mapped, we just increment the 2547 * hold count, and activate it. 2548 */ 2549 if (pte1_is_link(pte1)) { 2550 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2551 pt2_wirecount_inc(m, pte1_idx); 2552 } else { 2553 /* 2554 * Here if the PT2 isn't mapped, or if it has 2555 * been deallocated. 2556 */ 2557 m = _pmap_allocpte2(pmap, va, flags); 2558 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2559 goto retry; 2560 } 2561 2562 return (m); 2563 } 2564 2565 /* 2566 * Schedule the specified unused L2 page table page to be freed. Specifically, 2567 * add the page to the specified list of pages that will be released to the 2568 * physical memory manager after the TLB has been updated. 2569 */ 2570 static __inline void 2571 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free) 2572 { 2573 2574 /* 2575 * Put page on a list so that it is released after 2576 * *ALL* TLB shootdown is done 2577 */ 2578 #ifdef PMAP_DEBUG 2579 pmap_zero_page_check(m); 2580 #endif 2581 m->flags |= PG_ZERO; 2582 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 2583 } 2584 2585 /* 2586 * Unwire L2 page tables page. 2587 */ 2588 static void 2589 pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m) 2590 { 2591 pt1_entry_t *pte1p, opte1 __unused; 2592 pt2_entry_t *pte2p; 2593 uint32_t i; 2594 2595 KASSERT(pt2pg_is_empty(m), 2596 ("%s: pmap %p PT2PG %p wired", __func__, pmap, m)); 2597 2598 /* 2599 * Unmap all L2 page tables in the page from L1 page table. 2600 * 2601 * QQQ: Individual L2 page tables (except the last one) can be unmapped 2602 * earlier. However, we are doing that this way. 2603 */ 2604 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 2605 ("%s: pmap %p va %#x PT2PG %p bad index", __func__, pmap, va, m)); 2606 pte1p = pmap->pm_pt1 + m->pindex; 2607 for (i = 0; i < NPT2_IN_PG; i++, pte1p++) { 2608 KASSERT(m->md.pt2_wirecount[i] == 0, 2609 ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m)); 2610 opte1 = pte1_load(pte1p); 2611 if (pte1_is_link(opte1)) { 2612 pte1_clear(pte1p); 2613 /* 2614 * Flush intermediate TLB cache. 2615 */ 2616 pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT); 2617 } 2618 #ifdef INVARIANTS 2619 else 2620 KASSERT((opte1 == 0) || pte1_is_section(opte1), 2621 ("%s: pmap %p va %#x bad pte1 %x at %u", __func__, 2622 pmap, va, opte1, i)); 2623 #endif 2624 } 2625 2626 /* 2627 * Unmap the page from PT2TAB. 2628 */ 2629 pte2p = pmap_pt2tab_entry(pmap, va); 2630 (void)pt2tab_load_clear(pte2p); 2631 pmap_tlb_flush(pmap, pt2map_pt2pg(va)); 2632 2633 m->ref_count = 0; 2634 pmap->pm_stats.resident_count--; 2635 2636 /* 2637 * This barrier is so that the ordinary store unmapping 2638 * the L2 page table page is globally performed before TLB shoot- 2639 * down is begun. 2640 */ 2641 wmb(); 2642 vm_wire_sub(1); 2643 } 2644 2645 /* 2646 * Decrements a L2 page table page's wire count, which is used to record the 2647 * number of valid page table entries within the page. If the wire count 2648 * drops to zero, then the page table page is unmapped. Returns TRUE if the 2649 * page table page was unmapped and FALSE otherwise. 2650 */ 2651 static __inline boolean_t 2652 pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 2653 { 2654 pt2_wirecount_dec(m, pte1_index(va)); 2655 if (pt2pg_is_empty(m)) { 2656 /* 2657 * QQQ: Wire count is zero, so whole page should be zero and 2658 * we can set PG_ZERO flag to it. 2659 * Note that when promotion is enabled, it takes some 2660 * more efforts. See pmap_unwire_pt2_all() below. 2661 */ 2662 pmap_unwire_pt2pg(pmap, va, m); 2663 pmap_add_delayed_free_list(m, free); 2664 return (TRUE); 2665 } else 2666 return (FALSE); 2667 } 2668 2669 /* 2670 * Drop a L2 page table page's wire count at once, which is used to record 2671 * the number of valid L2 page table entries within the page. If the wire 2672 * count drops to zero, then the L2 page table page is unmapped. 2673 */ 2674 static __inline void 2675 pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m, 2676 struct spglist *free) 2677 { 2678 u_int pte1_idx = pte1_index(va); 2679 2680 KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK), 2681 ("%s: PT2 page's pindex is wrong", __func__)); 2682 KASSERT(m->ref_count > pt2_wirecount_get(m, pte1_idx), 2683 ("%s: bad pt2 wire count %u > %u", __func__, m->ref_count, 2684 pt2_wirecount_get(m, pte1_idx))); 2685 2686 /* 2687 * It's possible that the L2 page table was never used. 2688 * It happened in case that a section was created without promotion. 2689 */ 2690 if (pt2_is_full(m, va)) { 2691 pt2_wirecount_set(m, pte1_idx, 0); 2692 2693 /* 2694 * QQQ: We clear L2 page table now, so when L2 page table page 2695 * is going to be freed, we can set it PG_ZERO flag ... 2696 * This function is called only on section mappings, so 2697 * hopefully it's not to big overload. 2698 * 2699 * XXX: If pmap is current, existing PT2MAP mapping could be 2700 * used for zeroing. 2701 */ 2702 pmap_zero_page_area(m, page_pt2off(pte1_idx), NB_IN_PT2); 2703 } 2704 #ifdef INVARIANTS 2705 else 2706 KASSERT(pt2_is_empty(m, va), ("%s: PT2 is not empty (%u)", 2707 __func__, pt2_wirecount_get(m, pte1_idx))); 2708 #endif 2709 if (pt2pg_is_empty(m)) { 2710 pmap_unwire_pt2pg(pmap, va, m); 2711 pmap_add_delayed_free_list(m, free); 2712 } 2713 } 2714 2715 /* 2716 * After removing a L2 page table entry, this routine is used to 2717 * conditionally free the page, and manage the hold/wire counts. 2718 */ 2719 static boolean_t 2720 pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free) 2721 { 2722 pt1_entry_t pte1; 2723 vm_page_t mpte; 2724 2725 if (va >= VM_MAXUSER_ADDRESS) 2726 return (FALSE); 2727 pte1 = pte1_load(pmap_pte1(pmap, va)); 2728 mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2729 return (pmap_unwire_pt2(pmap, va, mpte, free)); 2730 } 2731 2732 /************************************* 2733 * 2734 * Page management routines. 2735 * 2736 *************************************/ 2737 2738 static const uint32_t pc_freemask[_NPCM] = { 2739 [0 ... _NPCM - 2] = PC_FREEN, 2740 [_NPCM - 1] = PC_FREEL 2741 }; 2742 2743 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 2744 "Current number of pv entries"); 2745 2746 #ifdef PV_STATS 2747 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2748 2749 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 2750 "Current number of pv entry chunks"); 2751 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 2752 "Current number of pv entry chunks allocated"); 2753 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 2754 "Current number of pv entry chunks frees"); 2755 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 2756 0, "Number of times tried to get a chunk page but failed."); 2757 2758 static long pv_entry_frees, pv_entry_allocs; 2759 static int pv_entry_spare; 2760 2761 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 2762 "Current number of pv entry frees"); 2763 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 2764 0, "Current number of pv entry allocs"); 2765 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 2766 "Current number of spare pv entries"); 2767 #endif 2768 2769 /* 2770 * Is given page managed? 2771 */ 2772 static __inline bool 2773 is_managed(vm_paddr_t pa) 2774 { 2775 vm_page_t m; 2776 2777 m = PHYS_TO_VM_PAGE(pa); 2778 if (m == NULL) 2779 return (false); 2780 return ((m->oflags & VPO_UNMANAGED) == 0); 2781 } 2782 2783 static __inline bool 2784 pte1_is_managed(pt1_entry_t pte1) 2785 { 2786 2787 return (is_managed(pte1_pa(pte1))); 2788 } 2789 2790 static __inline bool 2791 pte2_is_managed(pt2_entry_t pte2) 2792 { 2793 2794 return (is_managed(pte2_pa(pte2))); 2795 } 2796 2797 /* 2798 * We are in a serious low memory condition. Resort to 2799 * drastic measures to free some pages so we can allocate 2800 * another pv entry chunk. 2801 */ 2802 static vm_page_t 2803 pmap_pv_reclaim(pmap_t locked_pmap) 2804 { 2805 struct pch newtail; 2806 struct pv_chunk *pc; 2807 struct md_page *pvh; 2808 pt1_entry_t *pte1p; 2809 pmap_t pmap; 2810 pt2_entry_t *pte2p, tpte2; 2811 pv_entry_t pv; 2812 vm_offset_t va; 2813 vm_page_t m, m_pc; 2814 struct spglist free; 2815 uint32_t inuse; 2816 int bit, field, freed; 2817 2818 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2819 pmap = NULL; 2820 m_pc = NULL; 2821 SLIST_INIT(&free); 2822 TAILQ_INIT(&newtail); 2823 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2824 SLIST_EMPTY(&free))) { 2825 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2826 if (pmap != pc->pc_pmap) { 2827 if (pmap != NULL) { 2828 if (pmap != locked_pmap) 2829 PMAP_UNLOCK(pmap); 2830 } 2831 pmap = pc->pc_pmap; 2832 /* Avoid deadlock and lock recursion. */ 2833 if (pmap > locked_pmap) 2834 PMAP_LOCK(pmap); 2835 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2836 pmap = NULL; 2837 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2838 continue; 2839 } 2840 } 2841 2842 /* 2843 * Destroy every non-wired, 4 KB page mapping in the chunk. 2844 */ 2845 freed = 0; 2846 for (field = 0; field < _NPCM; field++) { 2847 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2848 inuse != 0; inuse &= ~(1UL << bit)) { 2849 bit = ffs(inuse) - 1; 2850 pv = &pc->pc_pventry[field * 32 + bit]; 2851 va = pv->pv_va; 2852 pte1p = pmap_pte1(pmap, va); 2853 if (pte1_is_section(pte1_load(pte1p))) 2854 continue; 2855 pte2p = pmap_pte2(pmap, va); 2856 tpte2 = pte2_load(pte2p); 2857 if ((tpte2 & PTE2_W) == 0) 2858 tpte2 = pte2_load_clear(pte2p); 2859 pmap_pte2_release(pte2p); 2860 if ((tpte2 & PTE2_W) != 0) 2861 continue; 2862 KASSERT(tpte2 != 0, 2863 ("pmap_pv_reclaim: pmap %p va %#x zero pte", 2864 pmap, va)); 2865 pmap_tlb_flush(pmap, va); 2866 m = PHYS_TO_VM_PAGE(pte2_pa(tpte2)); 2867 if (pte2_is_dirty(tpte2)) 2868 vm_page_dirty(m); 2869 if ((tpte2 & PTE2_A) != 0) 2870 vm_page_aflag_set(m, PGA_REFERENCED); 2871 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2872 if (TAILQ_EMPTY(&m->md.pv_list) && 2873 (m->flags & PG_FICTITIOUS) == 0) { 2874 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2875 if (TAILQ_EMPTY(&pvh->pv_list)) { 2876 vm_page_aflag_clear(m, 2877 PGA_WRITEABLE); 2878 } 2879 } 2880 pc->pc_map[field] |= 1UL << bit; 2881 pmap_unuse_pt2(pmap, va, &free); 2882 freed++; 2883 } 2884 } 2885 if (freed == 0) { 2886 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2887 continue; 2888 } 2889 /* Every freed mapping is for a 4 KB page. */ 2890 pmap->pm_stats.resident_count -= freed; 2891 PV_STAT(pv_entry_frees += freed); 2892 PV_STAT(pv_entry_spare += freed); 2893 pv_entry_count -= freed; 2894 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2895 for (field = 0; field < _NPCM; field++) 2896 if (pc->pc_map[field] != pc_freemask[field]) { 2897 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2898 pc_list); 2899 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2900 2901 /* 2902 * One freed pv entry in locked_pmap is 2903 * sufficient. 2904 */ 2905 if (pmap == locked_pmap) 2906 goto out; 2907 break; 2908 } 2909 if (field == _NPCM) { 2910 PV_STAT(pv_entry_spare -= _NPCPV); 2911 PV_STAT(pc_chunk_count--); 2912 PV_STAT(pc_chunk_frees++); 2913 /* Entire chunk is free; return it. */ 2914 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2915 pmap_qremove((vm_offset_t)pc, 1); 2916 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2917 break; 2918 } 2919 } 2920 out: 2921 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2922 if (pmap != NULL) { 2923 if (pmap != locked_pmap) 2924 PMAP_UNLOCK(pmap); 2925 } 2926 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2927 m_pc = SLIST_FIRST(&free); 2928 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2929 /* Recycle a freed page table page. */ 2930 m_pc->ref_count = 1; 2931 vm_wire_add(1); 2932 } 2933 vm_page_free_pages_toq(&free, false); 2934 return (m_pc); 2935 } 2936 2937 static void 2938 free_pv_chunk(struct pv_chunk *pc) 2939 { 2940 vm_page_t m; 2941 2942 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2943 PV_STAT(pv_entry_spare -= _NPCPV); 2944 PV_STAT(pc_chunk_count--); 2945 PV_STAT(pc_chunk_frees++); 2946 /* entire chunk is free, return it */ 2947 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2948 pmap_qremove((vm_offset_t)pc, 1); 2949 vm_page_unwire_noq(m); 2950 vm_page_free(m); 2951 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2952 } 2953 2954 /* 2955 * Free the pv_entry back to the free list. 2956 */ 2957 static void 2958 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2959 { 2960 struct pv_chunk *pc; 2961 int idx, field, bit; 2962 2963 rw_assert(&pvh_global_lock, RA_WLOCKED); 2964 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2965 PV_STAT(pv_entry_frees++); 2966 PV_STAT(pv_entry_spare++); 2967 pv_entry_count--; 2968 pc = pv_to_chunk(pv); 2969 idx = pv - &pc->pc_pventry[0]; 2970 field = idx / 32; 2971 bit = idx % 32; 2972 pc->pc_map[field] |= 1ul << bit; 2973 for (idx = 0; idx < _NPCM; idx++) 2974 if (pc->pc_map[idx] != pc_freemask[idx]) { 2975 /* 2976 * 98% of the time, pc is already at the head of the 2977 * list. If it isn't already, move it to the head. 2978 */ 2979 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2980 pc)) { 2981 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2982 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2983 pc_list); 2984 } 2985 return; 2986 } 2987 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2988 free_pv_chunk(pc); 2989 } 2990 2991 /* 2992 * Get a new pv_entry, allocating a block from the system 2993 * when needed. 2994 */ 2995 static pv_entry_t 2996 get_pv_entry(pmap_t pmap, boolean_t try) 2997 { 2998 static const struct timeval printinterval = { 60, 0 }; 2999 static struct timeval lastprint; 3000 int bit, field; 3001 pv_entry_t pv; 3002 struct pv_chunk *pc; 3003 vm_page_t m; 3004 3005 rw_assert(&pvh_global_lock, RA_WLOCKED); 3006 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3007 PV_STAT(pv_entry_allocs++); 3008 pv_entry_count++; 3009 if (pv_entry_count > pv_entry_high_water) 3010 if (ratecheck(&lastprint, &printinterval)) 3011 printf("Approaching the limit on PV entries, consider " 3012 "increasing either the vm.pmap.shpgperproc or the " 3013 "vm.pmap.pv_entry_max tunable.\n"); 3014 retry: 3015 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 3016 if (pc != NULL) { 3017 for (field = 0; field < _NPCM; field++) { 3018 if (pc->pc_map[field]) { 3019 bit = ffs(pc->pc_map[field]) - 1; 3020 break; 3021 } 3022 } 3023 if (field < _NPCM) { 3024 pv = &pc->pc_pventry[field * 32 + bit]; 3025 pc->pc_map[field] &= ~(1ul << bit); 3026 /* If this was the last item, move it to tail */ 3027 for (field = 0; field < _NPCM; field++) 3028 if (pc->pc_map[field] != 0) { 3029 PV_STAT(pv_entry_spare--); 3030 return (pv); /* not full, return */ 3031 } 3032 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3033 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 3034 PV_STAT(pv_entry_spare--); 3035 return (pv); 3036 } 3037 } 3038 /* 3039 * Access to the pte2list "pv_vafree" is synchronized by the pvh 3040 * global lock. If "pv_vafree" is currently non-empty, it will 3041 * remain non-empty until pmap_pte2list_alloc() completes. 3042 */ 3043 if (pv_vafree == 0 || 3044 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 3045 if (try) { 3046 pv_entry_count--; 3047 PV_STAT(pc_chunk_tryfail++); 3048 return (NULL); 3049 } 3050 m = pmap_pv_reclaim(pmap); 3051 if (m == NULL) 3052 goto retry; 3053 } 3054 PV_STAT(pc_chunk_count++); 3055 PV_STAT(pc_chunk_allocs++); 3056 pc = (struct pv_chunk *)pmap_pte2list_alloc(&pv_vafree); 3057 pmap_qenter((vm_offset_t)pc, &m, 1); 3058 pc->pc_pmap = pmap; 3059 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 3060 for (field = 1; field < _NPCM; field++) 3061 pc->pc_map[field] = pc_freemask[field]; 3062 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 3063 pv = &pc->pc_pventry[0]; 3064 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 3065 PV_STAT(pv_entry_spare += _NPCPV - 1); 3066 return (pv); 3067 } 3068 3069 /* 3070 * Create a pv entry for page at pa for 3071 * (pmap, va). 3072 */ 3073 static void 3074 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3075 { 3076 pv_entry_t pv; 3077 3078 rw_assert(&pvh_global_lock, RA_WLOCKED); 3079 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3080 pv = get_pv_entry(pmap, FALSE); 3081 pv->pv_va = va; 3082 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3083 } 3084 3085 static __inline pv_entry_t 3086 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3087 { 3088 pv_entry_t pv; 3089 3090 rw_assert(&pvh_global_lock, RA_WLOCKED); 3091 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 3092 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 3093 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 3094 break; 3095 } 3096 } 3097 return (pv); 3098 } 3099 3100 static void 3101 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3102 { 3103 pv_entry_t pv; 3104 3105 pv = pmap_pvh_remove(pvh, pmap, va); 3106 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 3107 free_pv_entry(pmap, pv); 3108 } 3109 3110 static void 3111 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 3112 { 3113 struct md_page *pvh; 3114 3115 rw_assert(&pvh_global_lock, RA_WLOCKED); 3116 pmap_pvh_free(&m->md, pmap, va); 3117 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 3118 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3119 if (TAILQ_EMPTY(&pvh->pv_list)) 3120 vm_page_aflag_clear(m, PGA_WRITEABLE); 3121 } 3122 } 3123 3124 static void 3125 pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3126 { 3127 struct md_page *pvh; 3128 pv_entry_t pv; 3129 vm_offset_t va_last; 3130 vm_page_t m; 3131 3132 rw_assert(&pvh_global_lock, RA_WLOCKED); 3133 KASSERT((pa & PTE1_OFFSET) == 0, 3134 ("pmap_pv_demote_pte1: pa is not 1mpage aligned")); 3135 3136 /* 3137 * Transfer the 1mpage's pv entry for this mapping to the first 3138 * page's pv list. 3139 */ 3140 pvh = pa_to_pvh(pa); 3141 va = pte1_trunc(va); 3142 pv = pmap_pvh_remove(pvh, pmap, va); 3143 KASSERT(pv != NULL, ("pmap_pv_demote_pte1: pv not found")); 3144 m = PHYS_TO_VM_PAGE(pa); 3145 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3146 /* Instantiate the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3147 va_last = va + PTE1_SIZE - PAGE_SIZE; 3148 do { 3149 m++; 3150 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3151 ("pmap_pv_demote_pte1: page %p is not managed", m)); 3152 va += PAGE_SIZE; 3153 pmap_insert_entry(pmap, va, m); 3154 } while (va < va_last); 3155 } 3156 3157 #if VM_NRESERVLEVEL > 0 3158 static void 3159 pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3160 { 3161 struct md_page *pvh; 3162 pv_entry_t pv; 3163 vm_offset_t va_last; 3164 vm_page_t m; 3165 3166 rw_assert(&pvh_global_lock, RA_WLOCKED); 3167 KASSERT((pa & PTE1_OFFSET) == 0, 3168 ("pmap_pv_promote_pte1: pa is not 1mpage aligned")); 3169 3170 /* 3171 * Transfer the first page's pv entry for this mapping to the 3172 * 1mpage's pv list. Aside from avoiding the cost of a call 3173 * to get_pv_entry(), a transfer avoids the possibility that 3174 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim() 3175 * removes one of the mappings that is being promoted. 3176 */ 3177 m = PHYS_TO_VM_PAGE(pa); 3178 va = pte1_trunc(va); 3179 pv = pmap_pvh_remove(&m->md, pmap, va); 3180 KASSERT(pv != NULL, ("pmap_pv_promote_pte1: pv not found")); 3181 pvh = pa_to_pvh(pa); 3182 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3183 /* Free the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3184 va_last = va + PTE1_SIZE - PAGE_SIZE; 3185 do { 3186 m++; 3187 va += PAGE_SIZE; 3188 pmap_pvh_free(&m->md, pmap, va); 3189 } while (va < va_last); 3190 } 3191 #endif 3192 3193 /* 3194 * Conditionally create a pv entry. 3195 */ 3196 static boolean_t 3197 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3198 { 3199 pv_entry_t pv; 3200 3201 rw_assert(&pvh_global_lock, RA_WLOCKED); 3202 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3203 if (pv_entry_count < pv_entry_high_water && 3204 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 3205 pv->pv_va = va; 3206 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3207 return (TRUE); 3208 } else 3209 return (FALSE); 3210 } 3211 3212 /* 3213 * Create the pv entries for each of the pages within a section. 3214 */ 3215 static bool 3216 pmap_pv_insert_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags) 3217 { 3218 struct md_page *pvh; 3219 pv_entry_t pv; 3220 bool noreclaim; 3221 3222 rw_assert(&pvh_global_lock, RA_WLOCKED); 3223 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 3224 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 3225 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 3226 return (false); 3227 pv->pv_va = va; 3228 pvh = pa_to_pvh(pte1_pa(pte1)); 3229 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3230 return (true); 3231 } 3232 3233 static inline void 3234 pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1) 3235 { 3236 3237 /* Kill all the small mappings or the big one only. */ 3238 if (pte1_is_section(npte1)) 3239 pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE); 3240 else 3241 pmap_tlb_flush(pmap, pte1_trunc(va)); 3242 } 3243 3244 /* 3245 * Update kernel pte1 on all pmaps. 3246 * 3247 * The following function is called only on one cpu with disabled interrupts. 3248 * In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way 3249 * nobody can invoke explicit hardware table walk during the update of pte1. 3250 * Unsolicited hardware table walk can still happen, invoked by speculative 3251 * data or instruction prefetch or even by speculative hardware table walk. 3252 * 3253 * The break-before-make approach should be implemented here. However, it's 3254 * not so easy to do that for kernel mappings as it would be unhappy to unmap 3255 * itself unexpectedly but voluntarily. 3256 */ 3257 static void 3258 pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1) 3259 { 3260 pmap_t pmap; 3261 pt1_entry_t *pte1p; 3262 3263 /* 3264 * Get current pmap. Interrupts should be disabled here 3265 * so PCPU_GET() is done atomically. 3266 */ 3267 pmap = PCPU_GET(curpmap); 3268 if (pmap == NULL) 3269 pmap = kernel_pmap; 3270 3271 /* 3272 * (1) Change pte1 on current pmap. 3273 * (2) Flush all obsolete TLB entries on current CPU. 3274 * (3) Change pte1 on all pmaps. 3275 * (4) Flush all obsolete TLB entries on all CPUs in SMP case. 3276 */ 3277 3278 pte1p = pmap_pte1(pmap, va); 3279 pte1_store(pte1p, npte1); 3280 3281 /* Kill all the small mappings or the big one only. */ 3282 if (pte1_is_section(npte1)) { 3283 pmap_pte1_kern_promotions++; 3284 tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE); 3285 } else { 3286 pmap_pte1_kern_demotions++; 3287 tlb_flush_local(pte1_trunc(va)); 3288 } 3289 3290 /* 3291 * In SMP case, this function is called when all cpus are at smp 3292 * rendezvous, so there is no need to use 'allpmaps_lock' lock here. 3293 * In UP case, the function is called with this lock locked. 3294 */ 3295 LIST_FOREACH(pmap, &allpmaps, pm_list) { 3296 pte1p = pmap_pte1(pmap, va); 3297 pte1_store(pte1p, npte1); 3298 } 3299 3300 #ifdef SMP 3301 /* Kill all the small mappings or the big one only. */ 3302 if (pte1_is_section(npte1)) 3303 tlb_flush_range(pte1_trunc(va), PTE1_SIZE); 3304 else 3305 tlb_flush(pte1_trunc(va)); 3306 #endif 3307 } 3308 3309 #ifdef SMP 3310 struct pte1_action { 3311 vm_offset_t va; 3312 pt1_entry_t npte1; 3313 u_int update; /* CPU that updates the PTE1 */ 3314 }; 3315 3316 static void 3317 pmap_update_pte1_action(void *arg) 3318 { 3319 struct pte1_action *act = arg; 3320 3321 if (act->update == PCPU_GET(cpuid)) 3322 pmap_update_pte1_kernel(act->va, act->npte1); 3323 } 3324 3325 /* 3326 * Change pte1 on current pmap. 3327 * Note that kernel pte1 must be changed on all pmaps. 3328 * 3329 * According to the architecture reference manual published by ARM, 3330 * the behaviour is UNPREDICTABLE when two or more TLB entries map the same VA. 3331 * According to this manual, UNPREDICTABLE behaviours must never happen in 3332 * a viable system. In contrast, on x86 processors, it is not specified which 3333 * TLB entry mapping the virtual address will be used, but the MMU doesn't 3334 * generate a bogus translation the way it does on Cortex-A8 rev 2 (Beaglebone 3335 * Black). 3336 * 3337 * It's a problem when either promotion or demotion is being done. The pte1 3338 * update and appropriate TLB flush must be done atomically in general. 3339 */ 3340 static void 3341 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3342 pt1_entry_t npte1) 3343 { 3344 3345 if (pmap == kernel_pmap) { 3346 struct pte1_action act; 3347 3348 sched_pin(); 3349 act.va = va; 3350 act.npte1 = npte1; 3351 act.update = PCPU_GET(cpuid); 3352 smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier, 3353 pmap_update_pte1_action, NULL, &act); 3354 sched_unpin(); 3355 } else { 3356 register_t cspr; 3357 3358 /* 3359 * Use break-before-make approach for changing userland 3360 * mappings. It can cause L1 translation aborts on other 3361 * cores in SMP case. So, special treatment is implemented 3362 * in pmap_fault(). To reduce the likelihood that another core 3363 * will be affected by the broken mapping, disable interrupts 3364 * until the mapping change is completed. 3365 */ 3366 cspr = disable_interrupts(PSR_I | PSR_F); 3367 pte1_clear(pte1p); 3368 pmap_tlb_flush_pte1(pmap, va, npte1); 3369 pte1_store(pte1p, npte1); 3370 restore_interrupts(cspr); 3371 } 3372 } 3373 #else 3374 static void 3375 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3376 pt1_entry_t npte1) 3377 { 3378 3379 if (pmap == kernel_pmap) { 3380 mtx_lock_spin(&allpmaps_lock); 3381 pmap_update_pte1_kernel(va, npte1); 3382 mtx_unlock_spin(&allpmaps_lock); 3383 } else { 3384 register_t cspr; 3385 3386 /* 3387 * Use break-before-make approach for changing userland 3388 * mappings. It's absolutely safe in UP case when interrupts 3389 * are disabled. 3390 */ 3391 cspr = disable_interrupts(PSR_I | PSR_F); 3392 pte1_clear(pte1p); 3393 pmap_tlb_flush_pte1(pmap, va, npte1); 3394 pte1_store(pte1p, npte1); 3395 restore_interrupts(cspr); 3396 } 3397 } 3398 #endif 3399 3400 #if VM_NRESERVLEVEL > 0 3401 /* 3402 * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are 3403 * within a single page table page (PT2) to a single 1MB page mapping. 3404 * For promotion to occur, two conditions must be met: (1) the 4KB page 3405 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3406 * mappings must have identical characteristics. 3407 * 3408 * Managed (PG_MANAGED) mappings within the kernel address space are not 3409 * promoted. The reason is that kernel PTE1s are replicated in each pmap but 3410 * pmap_remove_write(), pmap_clear_modify(), and pmap_clear_reference() only 3411 * read the PTE1 from the kernel pmap. 3412 */ 3413 static void 3414 pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3415 { 3416 pt1_entry_t npte1; 3417 pt2_entry_t *fpte2p, fpte2, fpte2_fav; 3418 pt2_entry_t *pte2p, pte2; 3419 vm_offset_t pteva __unused; 3420 vm_page_t m __unused; 3421 3422 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3423 pmap, va, pte1_load(pte1p), pte1p)); 3424 3425 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3426 3427 /* 3428 * Examine the first PTE2 in the specified PT2. Abort if this PTE2 is 3429 * either invalid, unused, or does not map the first 4KB physical page 3430 * within a 1MB page. 3431 */ 3432 fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va)); 3433 fpte2 = pte2_load(fpte2p); 3434 if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) != 3435 (PTE2_A | PTE2_V)) { 3436 pmap_pte1_p_failures++; 3437 CTR3(KTR_PMAP, "%s: failure(1) for va %#x in pmap %p", 3438 __func__, va, pmap); 3439 return; 3440 } 3441 if (pte2_is_managed(fpte2) && pmap == kernel_pmap) { 3442 pmap_pte1_p_failures++; 3443 CTR3(KTR_PMAP, "%s: failure(2) for va %#x in pmap %p", 3444 __func__, va, pmap); 3445 return; 3446 } 3447 if ((fpte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3448 /* 3449 * When page is not modified, PTE2_RO can be set without 3450 * a TLB invalidation. 3451 */ 3452 fpte2 |= PTE2_RO; 3453 pte2_store(fpte2p, fpte2); 3454 } 3455 3456 /* 3457 * Examine each of the other PTE2s in the specified PT2. Abort if this 3458 * PTE2 maps an unexpected 4KB physical page or does not have identical 3459 * characteristics to the first PTE2. 3460 */ 3461 fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V)); 3462 fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */ 3463 for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) { 3464 pte2 = pte2_load(pte2p); 3465 if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) { 3466 pmap_pte1_p_failures++; 3467 CTR3(KTR_PMAP, "%s: failure(3) for va %#x in pmap %p", 3468 __func__, va, pmap); 3469 return; 3470 } 3471 if ((pte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3472 /* 3473 * When page is not modified, PTE2_RO can be set 3474 * without a TLB invalidation. See note above. 3475 */ 3476 pte2 |= PTE2_RO; 3477 pte2_store(pte2p, pte2); 3478 pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET & 3479 PTE2_FRAME); 3480 CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p", 3481 __func__, pteva, pmap); 3482 } 3483 if ((pte2 & PTE2_PROMOTE) != (fpte2 & PTE2_PROMOTE)) { 3484 pmap_pte1_p_failures++; 3485 CTR3(KTR_PMAP, "%s: failure(4) for va %#x in pmap %p", 3486 __func__, va, pmap); 3487 return; 3488 } 3489 3490 fpte2_fav -= PTE2_SIZE; 3491 } 3492 /* 3493 * The page table page in its current state will stay in PT2TAB 3494 * until the PTE1 mapping the section is demoted by pmap_demote_pte1() 3495 * or destroyed by pmap_remove_pte1(). 3496 * 3497 * Note that L2 page table size is not equal to PAGE_SIZE. 3498 */ 3499 m = PHYS_TO_VM_PAGE(trunc_page(pte1_link_pa(pte1_load(pte1p)))); 3500 KASSERT(m >= vm_page_array && m < &vm_page_array[vm_page_array_size], 3501 ("%s: PT2 page is out of range", __func__)); 3502 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 3503 ("%s: PT2 page's pindex is wrong", __func__)); 3504 3505 /* 3506 * Get pte1 from pte2 format. 3507 */ 3508 npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V; 3509 3510 /* 3511 * Promote the pv entries. 3512 */ 3513 if (pte2_is_managed(fpte2)) 3514 pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1)); 3515 3516 /* 3517 * Promote the mappings. 3518 */ 3519 pmap_change_pte1(pmap, pte1p, va, npte1); 3520 3521 pmap_pte1_promotions++; 3522 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3523 __func__, va, pmap); 3524 3525 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3526 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3527 } 3528 #endif /* VM_NRESERVLEVEL > 0 */ 3529 3530 /* 3531 * Zero L2 page table page. 3532 */ 3533 static __inline void 3534 pmap_clear_pt2(pt2_entry_t *fpte2p) 3535 { 3536 pt2_entry_t *pte2p; 3537 3538 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) 3539 pte2_clear(pte2p); 3540 3541 } 3542 3543 /* 3544 * Removes a 1MB page mapping from the kernel pmap. 3545 */ 3546 static void 3547 pmap_remove_kernel_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3548 { 3549 vm_page_t m; 3550 uint32_t pte1_idx; 3551 pt2_entry_t *fpte2p; 3552 vm_paddr_t pt2_pa; 3553 3554 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3555 m = pmap_pt2_page(pmap, va); 3556 if (m == NULL) 3557 /* 3558 * QQQ: Is this function called only on promoted pte1? 3559 * We certainly do section mappings directly 3560 * (without promotion) in kernel !!! 3561 */ 3562 panic("%s: missing pt2 page", __func__); 3563 3564 pte1_idx = pte1_index(va); 3565 3566 /* 3567 * Initialize the L2 page table. 3568 */ 3569 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3570 pmap_clear_pt2(fpte2p); 3571 3572 /* 3573 * Remove the mapping. 3574 */ 3575 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(m), pte1_idx); 3576 pmap_kenter_pte1(va, PTE1_LINK(pt2_pa)); 3577 3578 /* 3579 * QQQ: We do not need to invalidate PT2MAP mapping 3580 * as we did not change it. I.e. the L2 page table page 3581 * was and still is mapped the same way. 3582 */ 3583 } 3584 3585 /* 3586 * Do the things to unmap a section in a process 3587 */ 3588 static void 3589 pmap_remove_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 3590 struct spglist *free) 3591 { 3592 pt1_entry_t opte1; 3593 struct md_page *pvh; 3594 vm_offset_t eva, va; 3595 vm_page_t m; 3596 3597 PDEBUG(6, printf("%s(%p): va %#x pte1 %#x at %p\n", __func__, pmap, sva, 3598 pte1_load(pte1p), pte1p)); 3599 3600 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3601 KASSERT((sva & PTE1_OFFSET) == 0, 3602 ("%s: sva is not 1mpage aligned", __func__)); 3603 3604 /* 3605 * Clear and invalidate the mapping. It should occupy one and only TLB 3606 * entry. So, pmap_tlb_flush() called with aligned address should be 3607 * sufficient. 3608 */ 3609 opte1 = pte1_load_clear(pte1p); 3610 pmap_tlb_flush(pmap, sva); 3611 3612 if (pte1_is_wired(opte1)) 3613 pmap->pm_stats.wired_count -= PTE1_SIZE / PAGE_SIZE; 3614 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 3615 if (pte1_is_managed(opte1)) { 3616 pvh = pa_to_pvh(pte1_pa(opte1)); 3617 pmap_pvh_free(pvh, pmap, sva); 3618 eva = sva + PTE1_SIZE; 3619 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 3620 va < eva; va += PAGE_SIZE, m++) { 3621 if (pte1_is_dirty(opte1)) 3622 vm_page_dirty(m); 3623 if (opte1 & PTE1_A) 3624 vm_page_aflag_set(m, PGA_REFERENCED); 3625 if (TAILQ_EMPTY(&m->md.pv_list) && 3626 TAILQ_EMPTY(&pvh->pv_list)) 3627 vm_page_aflag_clear(m, PGA_WRITEABLE); 3628 } 3629 } 3630 if (pmap == kernel_pmap) { 3631 /* 3632 * L2 page table(s) can't be removed from kernel map as 3633 * kernel counts on it (stuff around pmap_growkernel()). 3634 */ 3635 pmap_remove_kernel_pte1(pmap, pte1p, sva); 3636 } else { 3637 /* 3638 * Get associated L2 page table page. 3639 * It's possible that the page was never allocated. 3640 */ 3641 m = pmap_pt2_page(pmap, sva); 3642 if (m != NULL) 3643 pmap_unwire_pt2_all(pmap, sva, m, free); 3644 } 3645 } 3646 3647 /* 3648 * Fills L2 page table page with mappings to consecutive physical pages. 3649 */ 3650 static __inline void 3651 pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2) 3652 { 3653 pt2_entry_t *pte2p; 3654 3655 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) { 3656 pte2_store(pte2p, npte2); 3657 npte2 += PTE2_SIZE; 3658 } 3659 } 3660 3661 /* 3662 * Tries to demote a 1MB page mapping. If demotion fails, the 3663 * 1MB page mapping is invalidated. 3664 */ 3665 static boolean_t 3666 pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3667 { 3668 pt1_entry_t opte1, npte1; 3669 pt2_entry_t *fpte2p, npte2; 3670 vm_paddr_t pt2pg_pa, pt2_pa; 3671 vm_page_t m; 3672 struct spglist free; 3673 uint32_t pte1_idx, isnew = 0; 3674 3675 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3676 pmap, va, pte1_load(pte1p), pte1p)); 3677 3678 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3679 3680 opte1 = pte1_load(pte1p); 3681 KASSERT(pte1_is_section(opte1), ("%s: opte1 not a section", __func__)); 3682 3683 if ((opte1 & PTE1_A) == 0 || (m = pmap_pt2_page(pmap, va)) == NULL) { 3684 KASSERT(!pte1_is_wired(opte1), 3685 ("%s: PT2 page for a wired mapping is missing", __func__)); 3686 3687 /* 3688 * Invalidate the 1MB page mapping and return 3689 * "failure" if the mapping was never accessed or the 3690 * allocation of the new page table page fails. 3691 */ 3692 if ((opte1 & PTE1_A) == 0 || 3693 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 3694 SLIST_INIT(&free); 3695 pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free); 3696 vm_page_free_pages_toq(&free, false); 3697 CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p", 3698 __func__, va, pmap); 3699 return (FALSE); 3700 } 3701 m->pindex = pte1_index(va) & ~PT2PG_MASK; 3702 if (va < VM_MAXUSER_ADDRESS) 3703 pmap->pm_stats.resident_count++; 3704 3705 isnew = 1; 3706 3707 /* 3708 * We init all L2 page tables in the page even if 3709 * we are going to change everything for one L2 page 3710 * table in a while. 3711 */ 3712 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 3713 } else { 3714 if (va < VM_MAXUSER_ADDRESS) { 3715 if (pt2_is_empty(m, va)) 3716 isnew = 1; /* Demoting section w/o promotion. */ 3717 #ifdef INVARIANTS 3718 else 3719 KASSERT(pt2_is_full(m, va), ("%s: bad PT2 wire" 3720 " count %u", __func__, 3721 pt2_wirecount_get(m, pte1_index(va)))); 3722 #endif 3723 } 3724 } 3725 3726 pt2pg_pa = VM_PAGE_TO_PHYS(m); 3727 pte1_idx = pte1_index(va); 3728 /* 3729 * If the pmap is current, then the PT2MAP can provide access to 3730 * the page table page (promoted L2 page tables are not unmapped). 3731 * Otherwise, temporarily map the L2 page table page (m) into 3732 * the kernel's address space at either PADDR1 or PADDR2. 3733 * 3734 * Note that L2 page table size is not equal to PAGE_SIZE. 3735 */ 3736 if (pmap_is_current(pmap)) 3737 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3738 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 3739 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 3740 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 3741 #ifdef SMP 3742 PMAP1cpu = PCPU_GET(cpuid); 3743 #endif 3744 tlb_flush_local((vm_offset_t)PADDR1); 3745 PMAP1changed++; 3746 } else 3747 #ifdef SMP 3748 if (PMAP1cpu != PCPU_GET(cpuid)) { 3749 PMAP1cpu = PCPU_GET(cpuid); 3750 tlb_flush_local((vm_offset_t)PADDR1); 3751 PMAP1changedcpu++; 3752 } else 3753 #endif 3754 PMAP1unchanged++; 3755 fpte2p = page_pt2((vm_offset_t)PADDR1, pte1_idx); 3756 } else { 3757 mtx_lock(&PMAP2mutex); 3758 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 3759 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 3760 tlb_flush((vm_offset_t)PADDR2); 3761 } 3762 fpte2p = page_pt2((vm_offset_t)PADDR2, pte1_idx); 3763 } 3764 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 3765 npte1 = PTE1_LINK(pt2_pa); 3766 3767 KASSERT((opte1 & PTE1_A) != 0, 3768 ("%s: opte1 is missing PTE1_A", __func__)); 3769 KASSERT((opte1 & (PTE1_NM | PTE1_RO)) != PTE1_NM, 3770 ("%s: opte1 has PTE1_NM", __func__)); 3771 3772 /* 3773 * Get pte2 from pte1 format. 3774 */ 3775 npte2 = pte1_pa(opte1) | ATTR_TO_L2(opte1) | PTE2_V; 3776 3777 /* 3778 * If the L2 page table page is new, initialize it. If the mapping 3779 * has changed attributes, update the page table entries. 3780 */ 3781 if (isnew != 0) { 3782 pt2_wirecount_set(m, pte1_idx, NPTE2_IN_PT2); 3783 pmap_fill_pt2(fpte2p, npte2); 3784 } else if ((pte2_load(fpte2p) & PTE2_PROMOTE) != 3785 (npte2 & PTE2_PROMOTE)) 3786 pmap_fill_pt2(fpte2p, npte2); 3787 3788 KASSERT(pte2_pa(pte2_load(fpte2p)) == pte2_pa(npte2), 3789 ("%s: fpte2p and npte2 map different physical addresses", 3790 __func__)); 3791 3792 if (fpte2p == PADDR2) 3793 mtx_unlock(&PMAP2mutex); 3794 3795 /* 3796 * Demote the mapping. This pmap is locked. The old PTE1 has 3797 * PTE1_A set. If the old PTE1 has not PTE1_RO set, it also 3798 * has not PTE1_NM set. Thus, there is no danger of a race with 3799 * another processor changing the setting of PTE1_A and/or PTE1_NM 3800 * between the read above and the store below. 3801 */ 3802 pmap_change_pte1(pmap, pte1p, va, npte1); 3803 3804 /* 3805 * Demote the pv entry. This depends on the earlier demotion 3806 * of the mapping. Specifically, the (re)creation of a per- 3807 * page pv entry might trigger the execution of pmap_pv_reclaim(), 3808 * which might reclaim a newly (re)created per-page pv entry 3809 * and destroy the associated mapping. In order to destroy 3810 * the mapping, the PTE1 must have already changed from mapping 3811 * the 1mpage to referencing the page table page. 3812 */ 3813 if (pte1_is_managed(opte1)) 3814 pmap_pv_demote_pte1(pmap, va, pte1_pa(opte1)); 3815 3816 pmap_pte1_demotions++; 3817 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3818 __func__, va, pmap); 3819 3820 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3821 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3822 return (TRUE); 3823 } 3824 3825 /* 3826 * Insert the given physical page (p) at 3827 * the specified virtual address (v) in the 3828 * target physical map with the protection requested. 3829 * 3830 * If specified, the page will be wired down, meaning 3831 * that the related pte can not be reclaimed. 3832 * 3833 * NB: This is the only routine which MAY NOT lazy-evaluate 3834 * or lose information. That is, this routine must actually 3835 * insert this page into the given map NOW. 3836 */ 3837 int 3838 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3839 u_int flags, int8_t psind) 3840 { 3841 pt1_entry_t *pte1p; 3842 pt2_entry_t *pte2p; 3843 pt2_entry_t npte2, opte2; 3844 pv_entry_t pv; 3845 vm_paddr_t opa, pa; 3846 vm_page_t mpte2, om; 3847 int rv; 3848 3849 va = trunc_page(va); 3850 KASSERT(va <= vm_max_kernel_address, ("%s: toobig", __func__)); 3851 KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, 3852 ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, 3853 va)); 3854 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), 3855 ("%s: managed mapping within the clean submap", __func__)); 3856 if ((m->oflags & VPO_UNMANAGED) == 0) 3857 VM_PAGE_OBJECT_BUSY_ASSERT(m); 3858 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3859 ("%s: flags %u has reserved bits set", __func__, flags)); 3860 pa = VM_PAGE_TO_PHYS(m); 3861 npte2 = PTE2(pa, PTE2_A, vm_page_pte2_attr(m)); 3862 if ((flags & VM_PROT_WRITE) == 0) 3863 npte2 |= PTE2_NM; 3864 if ((prot & VM_PROT_WRITE) == 0) 3865 npte2 |= PTE2_RO; 3866 KASSERT((npte2 & (PTE2_NM | PTE2_RO)) != PTE2_RO, 3867 ("%s: flags includes VM_PROT_WRITE but prot doesn't", __func__)); 3868 if ((prot & VM_PROT_EXECUTE) == 0) 3869 npte2 |= PTE2_NX; 3870 if ((flags & PMAP_ENTER_WIRED) != 0) 3871 npte2 |= PTE2_W; 3872 if (va < VM_MAXUSER_ADDRESS) 3873 npte2 |= PTE2_U; 3874 if (pmap != kernel_pmap) 3875 npte2 |= PTE2_NG; 3876 3877 rw_wlock(&pvh_global_lock); 3878 PMAP_LOCK(pmap); 3879 sched_pin(); 3880 if (psind == 1) { 3881 /* Assert the required virtual and physical alignment. */ 3882 KASSERT((va & PTE1_OFFSET) == 0, 3883 ("%s: va unaligned", __func__)); 3884 KASSERT(m->psind > 0, ("%s: m->psind < psind", __func__)); 3885 rv = pmap_enter_pte1(pmap, va, PTE1_PA(pa) | ATTR_TO_L1(npte2) | 3886 PTE1_V, flags, m); 3887 goto out; 3888 } 3889 3890 /* 3891 * In the case that a page table page is not 3892 * resident, we are creating it here. 3893 */ 3894 if (va < VM_MAXUSER_ADDRESS) { 3895 mpte2 = pmap_allocpte2(pmap, va, flags); 3896 if (mpte2 == NULL) { 3897 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3898 ("pmap_allocpte2 failed with sleep allowed")); 3899 rv = KERN_RESOURCE_SHORTAGE; 3900 goto out; 3901 } 3902 } else 3903 mpte2 = NULL; 3904 pte1p = pmap_pte1(pmap, va); 3905 if (pte1_is_section(pte1_load(pte1p))) 3906 panic("%s: attempted on 1MB page", __func__); 3907 pte2p = pmap_pte2_quick(pmap, va); 3908 if (pte2p == NULL) 3909 panic("%s: invalid L1 page table entry va=%#x", __func__, va); 3910 3911 om = NULL; 3912 opte2 = pte2_load(pte2p); 3913 opa = pte2_pa(opte2); 3914 /* 3915 * Mapping has not changed, must be protection or wiring change. 3916 */ 3917 if (pte2_is_valid(opte2) && (opa == pa)) { 3918 /* 3919 * Wiring change, just update stats. We don't worry about 3920 * wiring PT2 pages as they remain resident as long as there 3921 * are valid mappings in them. Hence, if a user page is wired, 3922 * the PT2 page will be also. 3923 */ 3924 if (pte2_is_wired(npte2) && !pte2_is_wired(opte2)) 3925 pmap->pm_stats.wired_count++; 3926 else if (!pte2_is_wired(npte2) && pte2_is_wired(opte2)) 3927 pmap->pm_stats.wired_count--; 3928 3929 /* 3930 * Remove extra pte2 reference 3931 */ 3932 if (mpte2) 3933 pt2_wirecount_dec(mpte2, pte1_index(va)); 3934 if ((m->oflags & VPO_UNMANAGED) == 0) 3935 om = m; 3936 goto validate; 3937 } 3938 3939 /* 3940 * QQQ: We think that changing physical address on writeable mapping 3941 * is not safe. Well, maybe on kernel address space with correct 3942 * locking, it can make a sense. However, we have no idea why 3943 * anyone should do that on user address space. Are we wrong? 3944 */ 3945 KASSERT((opa == 0) || (opa == pa) || 3946 !pte2_is_valid(opte2) || ((opte2 & PTE2_RO) != 0), 3947 ("%s: pmap %p va %#x(%#x) opa %#x pa %#x - gotcha %#x %#x!", 3948 __func__, pmap, va, opte2, opa, pa, flags, prot)); 3949 3950 pv = NULL; 3951 3952 /* 3953 * Mapping has changed, invalidate old range and fall through to 3954 * handle validating new mapping. 3955 */ 3956 if (opa) { 3957 if (pte2_is_wired(opte2)) 3958 pmap->pm_stats.wired_count--; 3959 om = PHYS_TO_VM_PAGE(opa); 3960 if (om != NULL && (om->oflags & VPO_UNMANAGED) != 0) 3961 om = NULL; 3962 if (om != NULL) 3963 pv = pmap_pvh_remove(&om->md, pmap, va); 3964 3965 /* 3966 * Remove extra pte2 reference 3967 */ 3968 if (mpte2 != NULL) 3969 pt2_wirecount_dec(mpte2, va >> PTE1_SHIFT); 3970 } else 3971 pmap->pm_stats.resident_count++; 3972 3973 /* 3974 * Enter on the PV list if part of our managed memory. 3975 */ 3976 if ((m->oflags & VPO_UNMANAGED) == 0) { 3977 if (pv == NULL) { 3978 pv = get_pv_entry(pmap, FALSE); 3979 pv->pv_va = va; 3980 } 3981 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3982 } else if (pv != NULL) 3983 free_pv_entry(pmap, pv); 3984 3985 /* 3986 * Increment counters 3987 */ 3988 if (pte2_is_wired(npte2)) 3989 pmap->pm_stats.wired_count++; 3990 3991 validate: 3992 /* 3993 * Now validate mapping with desired protection/wiring. 3994 */ 3995 if (prot & VM_PROT_WRITE) { 3996 if ((m->oflags & VPO_UNMANAGED) == 0) 3997 vm_page_aflag_set(m, PGA_WRITEABLE); 3998 } 3999 4000 /* 4001 * If the mapping or permission bits are different, we need 4002 * to update the pte2. 4003 * 4004 * QQQ: Think again and again what to do 4005 * if the mapping is going to be changed! 4006 */ 4007 if ((opte2 & ~(PTE2_NM | PTE2_A)) != (npte2 & ~(PTE2_NM | PTE2_A))) { 4008 /* 4009 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4010 * is set. Do it now, before the mapping is stored and made 4011 * valid for hardware table walk. If done later, there is a race 4012 * for other threads of current process in lazy loading case. 4013 * Don't do it for kernel memory which is mapped with exec 4014 * permission even if the memory isn't going to hold executable 4015 * code. The only time when icache sync is needed is after 4016 * kernel module is loaded and the relocation info is processed. 4017 * And it's done in elf_cpu_load_file(). 4018 * 4019 * QQQ: (1) Does it exist any better way where 4020 * or how to sync icache? 4021 * (2) Now, we do it on a page basis. 4022 */ 4023 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && 4024 m->md.pat_mode == VM_MEMATTR_WB_WA && 4025 (opa != pa || (opte2 & PTE2_NX))) 4026 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4027 4028 if (opte2 & PTE2_V) { 4029 /* Change mapping with break-before-make approach. */ 4030 opte2 = pte2_load_clear(pte2p); 4031 pmap_tlb_flush(pmap, va); 4032 pte2_store(pte2p, npte2); 4033 if (om != NULL) { 4034 KASSERT((om->oflags & VPO_UNMANAGED) == 0, 4035 ("%s: om %p unmanaged", __func__, om)); 4036 if ((opte2 & PTE2_A) != 0) 4037 vm_page_aflag_set(om, PGA_REFERENCED); 4038 if (pte2_is_dirty(opte2)) 4039 vm_page_dirty(om); 4040 if (TAILQ_EMPTY(&om->md.pv_list) && 4041 ((om->flags & PG_FICTITIOUS) != 0 || 4042 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 4043 vm_page_aflag_clear(om, PGA_WRITEABLE); 4044 } 4045 } else 4046 pte2_store(pte2p, npte2); 4047 } 4048 #if 0 4049 else { 4050 /* 4051 * QQQ: In time when both access and not mofified bits are 4052 * emulated by software, this should not happen. Some 4053 * analysis is need, if this really happen. Missing 4054 * tlb flush somewhere could be the reason. 4055 */ 4056 panic("%s: pmap %p va %#x opte2 %x npte2 %x !!", __func__, pmap, 4057 va, opte2, npte2); 4058 } 4059 #endif 4060 4061 #if VM_NRESERVLEVEL > 0 4062 /* 4063 * If both the L2 page table page and the reservation are fully 4064 * populated, then attempt promotion. 4065 */ 4066 if ((mpte2 == NULL || pt2_is_full(mpte2, va)) && 4067 sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && 4068 vm_reserv_level_iffullpop(m) == 0) 4069 pmap_promote_pte1(pmap, pte1p, va); 4070 #endif 4071 4072 rv = KERN_SUCCESS; 4073 out: 4074 sched_unpin(); 4075 rw_wunlock(&pvh_global_lock); 4076 PMAP_UNLOCK(pmap); 4077 return (rv); 4078 } 4079 4080 /* 4081 * Do the things to unmap a page in a process. 4082 */ 4083 static int 4084 pmap_remove_pte2(pmap_t pmap, pt2_entry_t *pte2p, vm_offset_t va, 4085 struct spglist *free) 4086 { 4087 pt2_entry_t opte2; 4088 vm_page_t m; 4089 4090 rw_assert(&pvh_global_lock, RA_WLOCKED); 4091 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4092 4093 /* Clear and invalidate the mapping. */ 4094 opte2 = pte2_load_clear(pte2p); 4095 pmap_tlb_flush(pmap, va); 4096 4097 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %#x not link pte2 %#x", 4098 __func__, pmap, va, opte2)); 4099 4100 if (opte2 & PTE2_W) 4101 pmap->pm_stats.wired_count -= 1; 4102 pmap->pm_stats.resident_count -= 1; 4103 if (pte2_is_managed(opte2)) { 4104 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 4105 if (pte2_is_dirty(opte2)) 4106 vm_page_dirty(m); 4107 if (opte2 & PTE2_A) 4108 vm_page_aflag_set(m, PGA_REFERENCED); 4109 pmap_remove_entry(pmap, m, va); 4110 } 4111 return (pmap_unuse_pt2(pmap, va, free)); 4112 } 4113 4114 /* 4115 * Remove a single page from a process address space. 4116 */ 4117 static void 4118 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 4119 { 4120 pt2_entry_t *pte2p; 4121 4122 rw_assert(&pvh_global_lock, RA_WLOCKED); 4123 KASSERT(curthread->td_pinned > 0, 4124 ("%s: curthread not pinned", __func__)); 4125 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4126 if ((pte2p = pmap_pte2_quick(pmap, va)) == NULL || 4127 !pte2_is_valid(pte2_load(pte2p))) 4128 return; 4129 pmap_remove_pte2(pmap, pte2p, va, free); 4130 } 4131 4132 /* 4133 * Remove the given range of addresses from the specified map. 4134 * 4135 * It is assumed that the start and end are properly 4136 * rounded to the page size. 4137 */ 4138 void 4139 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4140 { 4141 vm_offset_t nextva; 4142 pt1_entry_t *pte1p, pte1; 4143 pt2_entry_t *pte2p, pte2; 4144 struct spglist free; 4145 4146 /* 4147 * Perform an unsynchronized read. This is, however, safe. 4148 */ 4149 if (pmap->pm_stats.resident_count == 0) 4150 return; 4151 4152 SLIST_INIT(&free); 4153 4154 rw_wlock(&pvh_global_lock); 4155 sched_pin(); 4156 PMAP_LOCK(pmap); 4157 4158 /* 4159 * Special handling of removing one page. A very common 4160 * operation and easy to short circuit some code. 4161 */ 4162 if (sva + PAGE_SIZE == eva) { 4163 pte1 = pte1_load(pmap_pte1(pmap, sva)); 4164 if (pte1_is_link(pte1)) { 4165 pmap_remove_page(pmap, sva, &free); 4166 goto out; 4167 } 4168 } 4169 4170 for (; sva < eva; sva = nextva) { 4171 /* 4172 * Calculate address for next L2 page table. 4173 */ 4174 nextva = pte1_trunc(sva + PTE1_SIZE); 4175 if (nextva < sva) 4176 nextva = eva; 4177 if (pmap->pm_stats.resident_count == 0) 4178 break; 4179 4180 pte1p = pmap_pte1(pmap, sva); 4181 pte1 = pte1_load(pte1p); 4182 4183 /* 4184 * Weed out invalid mappings. Note: we assume that the L1 page 4185 * table is always allocated, and in kernel virtual. 4186 */ 4187 if (pte1 == 0) 4188 continue; 4189 4190 if (pte1_is_section(pte1)) { 4191 /* 4192 * Are we removing the entire large page? If not, 4193 * demote the mapping and fall through. 4194 */ 4195 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4196 pmap_remove_pte1(pmap, pte1p, sva, &free); 4197 continue; 4198 } else if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4199 /* The large page mapping was destroyed. */ 4200 continue; 4201 } 4202 #ifdef INVARIANTS 4203 else { 4204 /* Update pte1 after demotion. */ 4205 pte1 = pte1_load(pte1p); 4206 } 4207 #endif 4208 } 4209 4210 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 4211 " is not link", __func__, pmap, sva, pte1, pte1p)); 4212 4213 /* 4214 * Limit our scan to either the end of the va represented 4215 * by the current L2 page table page, or to the end of the 4216 * range being removed. 4217 */ 4218 if (nextva > eva) 4219 nextva = eva; 4220 4221 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; 4222 pte2p++, sva += PAGE_SIZE) { 4223 pte2 = pte2_load(pte2p); 4224 if (!pte2_is_valid(pte2)) 4225 continue; 4226 if (pmap_remove_pte2(pmap, pte2p, sva, &free)) 4227 break; 4228 } 4229 } 4230 out: 4231 sched_unpin(); 4232 rw_wunlock(&pvh_global_lock); 4233 PMAP_UNLOCK(pmap); 4234 vm_page_free_pages_toq(&free, false); 4235 } 4236 4237 /* 4238 * Routine: pmap_remove_all 4239 * Function: 4240 * Removes this physical page from 4241 * all physical maps in which it resides. 4242 * Reflects back modify bits to the pager. 4243 * 4244 * Notes: 4245 * Original versions of this routine were very 4246 * inefficient because they iteratively called 4247 * pmap_remove (slow...) 4248 */ 4249 4250 void 4251 pmap_remove_all(vm_page_t m) 4252 { 4253 struct md_page *pvh; 4254 pv_entry_t pv; 4255 pmap_t pmap; 4256 pt2_entry_t *pte2p, opte2; 4257 pt1_entry_t *pte1p; 4258 vm_offset_t va; 4259 struct spglist free; 4260 4261 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4262 ("%s: page %p is not managed", __func__, m)); 4263 SLIST_INIT(&free); 4264 rw_wlock(&pvh_global_lock); 4265 sched_pin(); 4266 if ((m->flags & PG_FICTITIOUS) != 0) 4267 goto small_mappings; 4268 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4269 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 4270 va = pv->pv_va; 4271 pmap = PV_PMAP(pv); 4272 PMAP_LOCK(pmap); 4273 pte1p = pmap_pte1(pmap, va); 4274 (void)pmap_demote_pte1(pmap, pte1p, va); 4275 PMAP_UNLOCK(pmap); 4276 } 4277 small_mappings: 4278 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 4279 pmap = PV_PMAP(pv); 4280 PMAP_LOCK(pmap); 4281 pmap->pm_stats.resident_count--; 4282 pte1p = pmap_pte1(pmap, pv->pv_va); 4283 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found " 4284 "a 1mpage in page %p's pv list", __func__, m)); 4285 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 4286 opte2 = pte2_load_clear(pte2p); 4287 pmap_tlb_flush(pmap, pv->pv_va); 4288 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %x zero pte2", 4289 __func__, pmap, pv->pv_va)); 4290 if (pte2_is_wired(opte2)) 4291 pmap->pm_stats.wired_count--; 4292 if (opte2 & PTE2_A) 4293 vm_page_aflag_set(m, PGA_REFERENCED); 4294 4295 /* 4296 * Update the vm_page_t clean and reference bits. 4297 */ 4298 if (pte2_is_dirty(opte2)) 4299 vm_page_dirty(m); 4300 pmap_unuse_pt2(pmap, pv->pv_va, &free); 4301 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4302 free_pv_entry(pmap, pv); 4303 PMAP_UNLOCK(pmap); 4304 } 4305 vm_page_aflag_clear(m, PGA_WRITEABLE); 4306 sched_unpin(); 4307 rw_wunlock(&pvh_global_lock); 4308 vm_page_free_pages_toq(&free, false); 4309 } 4310 4311 /* 4312 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4313 * good coding style, a.k.a. 80 character line width limit hell. 4314 */ 4315 static __inline void 4316 pmap_remove_pte1_quick(pmap_t pmap, pt1_entry_t pte1, pv_entry_t pv, 4317 struct spglist *free) 4318 { 4319 vm_paddr_t pa; 4320 vm_page_t m, mt, mpt2pg; 4321 struct md_page *pvh; 4322 4323 pa = pte1_pa(pte1); 4324 m = PHYS_TO_VM_PAGE(pa); 4325 4326 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4327 __func__, m, m->phys_addr, pa)); 4328 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4329 m < &vm_page_array[vm_page_array_size], 4330 ("%s: bad pte1 %#x", __func__, pte1)); 4331 4332 if (pte1_is_dirty(pte1)) { 4333 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4334 vm_page_dirty(mt); 4335 } 4336 4337 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 4338 pvh = pa_to_pvh(pa); 4339 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4340 if (TAILQ_EMPTY(&pvh->pv_list)) { 4341 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4342 if (TAILQ_EMPTY(&mt->md.pv_list)) 4343 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4344 } 4345 mpt2pg = pmap_pt2_page(pmap, pv->pv_va); 4346 if (mpt2pg != NULL) 4347 pmap_unwire_pt2_all(pmap, pv->pv_va, mpt2pg, free); 4348 } 4349 4350 /* 4351 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4352 * good coding style, a.k.a. 80 character line width limit hell. 4353 */ 4354 static __inline void 4355 pmap_remove_pte2_quick(pmap_t pmap, pt2_entry_t pte2, pv_entry_t pv, 4356 struct spglist *free) 4357 { 4358 vm_paddr_t pa; 4359 vm_page_t m; 4360 struct md_page *pvh; 4361 4362 pa = pte2_pa(pte2); 4363 m = PHYS_TO_VM_PAGE(pa); 4364 4365 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4366 __func__, m, m->phys_addr, pa)); 4367 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4368 m < &vm_page_array[vm_page_array_size], 4369 ("%s: bad pte2 %#x", __func__, pte2)); 4370 4371 if (pte2_is_dirty(pte2)) 4372 vm_page_dirty(m); 4373 4374 pmap->pm_stats.resident_count--; 4375 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4376 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 4377 pvh = pa_to_pvh(pa); 4378 if (TAILQ_EMPTY(&pvh->pv_list)) 4379 vm_page_aflag_clear(m, PGA_WRITEABLE); 4380 } 4381 pmap_unuse_pt2(pmap, pv->pv_va, free); 4382 } 4383 4384 /* 4385 * Remove all pages from specified address space this aids process 4386 * exit speeds. Also, this code is special cased for current process 4387 * only, but can have the more generic (and slightly slower) mode enabled. 4388 * This is much faster than pmap_remove in the case of running down 4389 * an entire address space. 4390 */ 4391 void 4392 pmap_remove_pages(pmap_t pmap) 4393 { 4394 pt1_entry_t *pte1p, pte1; 4395 pt2_entry_t *pte2p, pte2; 4396 pv_entry_t pv; 4397 struct pv_chunk *pc, *npc; 4398 struct spglist free; 4399 int field, idx; 4400 int32_t bit; 4401 uint32_t inuse, bitmask; 4402 boolean_t allfree; 4403 4404 /* 4405 * Assert that the given pmap is only active on the current 4406 * CPU. Unfortunately, we cannot block another CPU from 4407 * activating the pmap while this function is executing. 4408 */ 4409 KASSERT(pmap == vmspace_pmap(curthread->td_proc->p_vmspace), 4410 ("%s: non-current pmap %p", __func__, pmap)); 4411 #if defined(SMP) && defined(INVARIANTS) 4412 { 4413 cpuset_t other_cpus; 4414 4415 sched_pin(); 4416 other_cpus = pmap->pm_active; 4417 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 4418 sched_unpin(); 4419 KASSERT(CPU_EMPTY(&other_cpus), 4420 ("%s: pmap %p active on other cpus", __func__, pmap)); 4421 } 4422 #endif 4423 SLIST_INIT(&free); 4424 rw_wlock(&pvh_global_lock); 4425 PMAP_LOCK(pmap); 4426 sched_pin(); 4427 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4428 KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p", 4429 __func__, pmap, pc->pc_pmap)); 4430 allfree = TRUE; 4431 for (field = 0; field < _NPCM; field++) { 4432 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 4433 while (inuse != 0) { 4434 bit = ffs(inuse) - 1; 4435 bitmask = 1UL << bit; 4436 idx = field * 32 + bit; 4437 pv = &pc->pc_pventry[idx]; 4438 inuse &= ~bitmask; 4439 4440 /* 4441 * Note that we cannot remove wired pages 4442 * from a process' mapping at this time 4443 */ 4444 pte1p = pmap_pte1(pmap, pv->pv_va); 4445 pte1 = pte1_load(pte1p); 4446 if (pte1_is_section(pte1)) { 4447 if (pte1_is_wired(pte1)) { 4448 allfree = FALSE; 4449 continue; 4450 } 4451 pte1_clear(pte1p); 4452 pmap_remove_pte1_quick(pmap, pte1, pv, 4453 &free); 4454 } 4455 else if (pte1_is_link(pte1)) { 4456 pte2p = pt2map_entry(pv->pv_va); 4457 pte2 = pte2_load(pte2p); 4458 4459 if (!pte2_is_valid(pte2)) { 4460 printf("%s: pmap %p va %#x " 4461 "pte2 %#x\n", __func__, 4462 pmap, pv->pv_va, pte2); 4463 panic("bad pte2"); 4464 } 4465 4466 if (pte2_is_wired(pte2)) { 4467 allfree = FALSE; 4468 continue; 4469 } 4470 pte2_clear(pte2p); 4471 pmap_remove_pte2_quick(pmap, pte2, pv, 4472 &free); 4473 } else { 4474 printf("%s: pmap %p va %#x pte1 %#x\n", 4475 __func__, pmap, pv->pv_va, pte1); 4476 panic("bad pte1"); 4477 } 4478 4479 /* Mark free */ 4480 PV_STAT(pv_entry_frees++); 4481 PV_STAT(pv_entry_spare++); 4482 pv_entry_count--; 4483 pc->pc_map[field] |= bitmask; 4484 } 4485 } 4486 if (allfree) { 4487 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4488 free_pv_chunk(pc); 4489 } 4490 } 4491 tlb_flush_all_ng_local(); 4492 sched_unpin(); 4493 rw_wunlock(&pvh_global_lock); 4494 PMAP_UNLOCK(pmap); 4495 vm_page_free_pages_toq(&free, false); 4496 } 4497 4498 /* 4499 * This code makes some *MAJOR* assumptions: 4500 * 1. Current pmap & pmap exists. 4501 * 2. Not wired. 4502 * 3. Read access. 4503 * 4. No L2 page table pages. 4504 * but is *MUCH* faster than pmap_enter... 4505 */ 4506 static vm_page_t 4507 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4508 vm_prot_t prot, vm_page_t mpt2pg) 4509 { 4510 pt2_entry_t *pte2p, pte2; 4511 vm_paddr_t pa; 4512 struct spglist free; 4513 uint32_t l2prot; 4514 4515 KASSERT(!VA_IS_CLEANMAP(va) || 4516 (m->oflags & VPO_UNMANAGED) != 0, 4517 ("%s: managed mapping within the clean submap", __func__)); 4518 rw_assert(&pvh_global_lock, RA_WLOCKED); 4519 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4520 4521 /* 4522 * In the case that a L2 page table page is not 4523 * resident, we are creating it here. 4524 */ 4525 if (va < VM_MAXUSER_ADDRESS) { 4526 u_int pte1_idx; 4527 pt1_entry_t pte1, *pte1p; 4528 vm_paddr_t pt2_pa; 4529 4530 /* 4531 * Get L1 page table things. 4532 */ 4533 pte1_idx = pte1_index(va); 4534 pte1p = pmap_pte1(pmap, va); 4535 pte1 = pte1_load(pte1p); 4536 4537 if (mpt2pg && (mpt2pg->pindex == (pte1_idx & ~PT2PG_MASK))) { 4538 /* 4539 * Each of NPT2_IN_PG L2 page tables on the page can 4540 * come here. Make sure that associated L1 page table 4541 * link is established. 4542 * 4543 * QQQ: It comes that we don't establish all links to 4544 * L2 page tables for newly allocated L2 page 4545 * tables page. 4546 */ 4547 KASSERT(!pte1_is_section(pte1), 4548 ("%s: pte1 %#x is section", __func__, pte1)); 4549 if (!pte1_is_link(pte1)) { 4550 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(mpt2pg), 4551 pte1_idx); 4552 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 4553 } 4554 pt2_wirecount_inc(mpt2pg, pte1_idx); 4555 } else { 4556 /* 4557 * If the L2 page table page is mapped, we just 4558 * increment the hold count, and activate it. 4559 */ 4560 if (pte1_is_section(pte1)) { 4561 return (NULL); 4562 } else if (pte1_is_link(pte1)) { 4563 mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 4564 pt2_wirecount_inc(mpt2pg, pte1_idx); 4565 } else { 4566 mpt2pg = _pmap_allocpte2(pmap, va, 4567 PMAP_ENTER_NOSLEEP); 4568 if (mpt2pg == NULL) 4569 return (NULL); 4570 } 4571 } 4572 } else { 4573 mpt2pg = NULL; 4574 } 4575 4576 /* 4577 * This call to pt2map_entry() makes the assumption that we are 4578 * entering the page into the current pmap. In order to support 4579 * quick entry into any pmap, one would likely use pmap_pte2_quick(). 4580 * But that isn't as quick as pt2map_entry(). 4581 */ 4582 pte2p = pt2map_entry(va); 4583 pte2 = pte2_load(pte2p); 4584 if (pte2_is_valid(pte2)) { 4585 if (mpt2pg != NULL) { 4586 /* 4587 * Remove extra pte2 reference 4588 */ 4589 pt2_wirecount_dec(mpt2pg, pte1_index(va)); 4590 mpt2pg = NULL; 4591 } 4592 return (NULL); 4593 } 4594 4595 /* 4596 * Enter on the PV list if part of our managed memory. 4597 */ 4598 if ((m->oflags & VPO_UNMANAGED) == 0 && 4599 !pmap_try_insert_pv_entry(pmap, va, m)) { 4600 if (mpt2pg != NULL) { 4601 SLIST_INIT(&free); 4602 if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) { 4603 pmap_tlb_flush(pmap, va); 4604 vm_page_free_pages_toq(&free, false); 4605 } 4606 4607 mpt2pg = NULL; 4608 } 4609 return (NULL); 4610 } 4611 4612 /* 4613 * Increment counters 4614 */ 4615 pmap->pm_stats.resident_count++; 4616 4617 /* 4618 * Now validate mapping with RO protection 4619 */ 4620 pa = VM_PAGE_TO_PHYS(m); 4621 l2prot = PTE2_RO | PTE2_NM; 4622 if (va < VM_MAXUSER_ADDRESS) 4623 l2prot |= PTE2_U | PTE2_NG; 4624 if ((prot & VM_PROT_EXECUTE) == 0) 4625 l2prot |= PTE2_NX; 4626 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) { 4627 /* 4628 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4629 * is set. QQQ: For more info, see comments in pmap_enter(). 4630 */ 4631 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4632 } 4633 pte2_store(pte2p, PTE2(pa, l2prot, vm_page_pte2_attr(m))); 4634 4635 return (mpt2pg); 4636 } 4637 4638 void 4639 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4640 { 4641 4642 rw_wlock(&pvh_global_lock); 4643 PMAP_LOCK(pmap); 4644 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4645 rw_wunlock(&pvh_global_lock); 4646 PMAP_UNLOCK(pmap); 4647 } 4648 4649 /* 4650 * Tries to create a read- and/or execute-only 1 MB page mapping. Returns 4651 * true if successful. Returns false if (1) a mapping already exists at the 4652 * specified virtual address or (2) a PV entry cannot be allocated without 4653 * reclaiming another PV entry. 4654 */ 4655 static bool 4656 pmap_enter_1mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4657 { 4658 pt1_entry_t pte1; 4659 vm_paddr_t pa; 4660 4661 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4662 pa = VM_PAGE_TO_PHYS(m); 4663 pte1 = PTE1(pa, PTE1_NM | PTE1_RO, ATTR_TO_L1(vm_page_pte2_attr(m))); 4664 if ((prot & VM_PROT_EXECUTE) == 0) 4665 pte1 |= PTE1_NX; 4666 if (va < VM_MAXUSER_ADDRESS) 4667 pte1 |= PTE1_U; 4668 if (pmap != kernel_pmap) 4669 pte1 |= PTE1_NG; 4670 return (pmap_enter_pte1(pmap, va, pte1, PMAP_ENTER_NOSLEEP | 4671 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m) == KERN_SUCCESS); 4672 } 4673 4674 /* 4675 * Tries to create the specified 1 MB page mapping. Returns KERN_SUCCESS if 4676 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE 4677 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and 4678 * a mapping already exists at the specified virtual address. Returns 4679 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NORECLAIM was specified and PV entry 4680 * allocation failed. 4681 */ 4682 static int 4683 pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags, 4684 vm_page_t m) 4685 { 4686 struct spglist free; 4687 pt1_entry_t opte1, *pte1p; 4688 pt2_entry_t pte2, *pte2p; 4689 vm_offset_t cur, end; 4690 vm_page_t mt; 4691 4692 rw_assert(&pvh_global_lock, RA_WLOCKED); 4693 KASSERT((pte1 & (PTE1_NM | PTE1_RO)) == 0 || 4694 (pte1 & (PTE1_NM | PTE1_RO)) == (PTE1_NM | PTE1_RO), 4695 ("%s: pte1 has inconsistent NM and RO attributes", __func__)); 4696 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4697 pte1p = pmap_pte1(pmap, va); 4698 opte1 = pte1_load(pte1p); 4699 if (pte1_is_valid(opte1)) { 4700 if ((flags & PMAP_ENTER_NOREPLACE) != 0) { 4701 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4702 __func__, va, pmap); 4703 return (KERN_FAILURE); 4704 } 4705 /* Break the existing mapping(s). */ 4706 SLIST_INIT(&free); 4707 if (pte1_is_section(opte1)) { 4708 /* 4709 * If the section resulted from a promotion, then a 4710 * reserved PT page could be freed. 4711 */ 4712 pmap_remove_pte1(pmap, pte1p, va, &free); 4713 } else { 4714 sched_pin(); 4715 end = va + PTE1_SIZE; 4716 for (cur = va, pte2p = pmap_pte2_quick(pmap, va); 4717 cur != end; cur += PAGE_SIZE, pte2p++) { 4718 pte2 = pte2_load(pte2p); 4719 if (!pte2_is_valid(pte2)) 4720 continue; 4721 if (pmap_remove_pte2(pmap, pte2p, cur, &free)) 4722 break; 4723 } 4724 sched_unpin(); 4725 } 4726 vm_page_free_pages_toq(&free, false); 4727 } 4728 if ((m->oflags & VPO_UNMANAGED) == 0) { 4729 /* 4730 * Abort this mapping if its PV entry could not be created. 4731 */ 4732 if (!pmap_pv_insert_pte1(pmap, va, pte1, flags)) { 4733 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4734 __func__, va, pmap); 4735 return (KERN_RESOURCE_SHORTAGE); 4736 } 4737 if ((pte1 & PTE1_RO) == 0) { 4738 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4739 vm_page_aflag_set(mt, PGA_WRITEABLE); 4740 } 4741 } 4742 4743 /* 4744 * Increment counters. 4745 */ 4746 if (pte1_is_wired(pte1)) 4747 pmap->pm_stats.wired_count += PTE1_SIZE / PAGE_SIZE; 4748 pmap->pm_stats.resident_count += PTE1_SIZE / PAGE_SIZE; 4749 4750 /* 4751 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4752 * is set. QQQ: For more info, see comments in pmap_enter(). 4753 */ 4754 if ((pte1 & PTE1_NX) == 0 && m->md.pat_mode == VM_MEMATTR_WB_WA && 4755 pmap != kernel_pmap && (!pte1_is_section(opte1) || 4756 pte1_pa(opte1) != VM_PAGE_TO_PHYS(m) || (opte1 & PTE2_NX) != 0)) 4757 cache_icache_sync_fresh(va, VM_PAGE_TO_PHYS(m), PTE1_SIZE); 4758 4759 /* 4760 * Map the section. 4761 */ 4762 pte1_store(pte1p, pte1); 4763 4764 pmap_pte1_mappings++; 4765 CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, 4766 pmap); 4767 return (KERN_SUCCESS); 4768 } 4769 4770 /* 4771 * Maps a sequence of resident pages belonging to the same object. 4772 * The sequence begins with the given page m_start. This page is 4773 * mapped at the given virtual address start. Each subsequent page is 4774 * mapped at a virtual address that is offset from start by the same 4775 * amount as the page is offset from m_start within the object. The 4776 * last page in the sequence is the page with the largest offset from 4777 * m_start that can be mapped at a virtual address less than the given 4778 * virtual address end. Not every virtual page between start and end 4779 * is mapped; only those for which a resident page exists with the 4780 * corresponding offset from m_start are mapped. 4781 */ 4782 void 4783 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4784 vm_page_t m_start, vm_prot_t prot) 4785 { 4786 vm_offset_t va; 4787 vm_page_t m, mpt2pg; 4788 vm_pindex_t diff, psize; 4789 4790 PDEBUG(6, printf("%s: pmap %p start %#x end %#x m %p prot %#x\n", 4791 __func__, pmap, start, end, m_start, prot)); 4792 4793 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4794 psize = atop(end - start); 4795 mpt2pg = NULL; 4796 m = m_start; 4797 rw_wlock(&pvh_global_lock); 4798 PMAP_LOCK(pmap); 4799 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4800 va = start + ptoa(diff); 4801 if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end && 4802 m->psind == 1 && sp_enabled && 4803 pmap_enter_1mpage(pmap, va, m, prot)) 4804 m = &m[PTE1_SIZE / PAGE_SIZE - 1]; 4805 else 4806 mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot, 4807 mpt2pg); 4808 m = TAILQ_NEXT(m, listq); 4809 } 4810 rw_wunlock(&pvh_global_lock); 4811 PMAP_UNLOCK(pmap); 4812 } 4813 4814 /* 4815 * This code maps large physical mmap regions into the 4816 * processor address space. Note that some shortcuts 4817 * are taken, but the code works. 4818 */ 4819 void 4820 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 4821 vm_pindex_t pindex, vm_size_t size) 4822 { 4823 pt1_entry_t *pte1p; 4824 vm_paddr_t pa, pte2_pa; 4825 vm_page_t p; 4826 vm_memattr_t pat_mode; 4827 u_int l1attr, l1prot; 4828 4829 VM_OBJECT_ASSERT_WLOCKED(object); 4830 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4831 ("%s: non-device object", __func__)); 4832 if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) { 4833 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4834 return; 4835 p = vm_page_lookup(object, pindex); 4836 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4837 ("%s: invalid page %p", __func__, p)); 4838 pat_mode = p->md.pat_mode; 4839 4840 /* 4841 * Abort the mapping if the first page is not physically 4842 * aligned to a 1MB page boundary. 4843 */ 4844 pte2_pa = VM_PAGE_TO_PHYS(p); 4845 if (pte2_pa & PTE1_OFFSET) 4846 return; 4847 4848 /* 4849 * Skip the first page. Abort the mapping if the rest of 4850 * the pages are not physically contiguous or have differing 4851 * memory attributes. 4852 */ 4853 p = TAILQ_NEXT(p, listq); 4854 for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size; 4855 pa += PAGE_SIZE) { 4856 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4857 ("%s: invalid page %p", __func__, p)); 4858 if (pa != VM_PAGE_TO_PHYS(p) || 4859 pat_mode != p->md.pat_mode) 4860 return; 4861 p = TAILQ_NEXT(p, listq); 4862 } 4863 4864 /* 4865 * Map using 1MB pages. 4866 * 4867 * QQQ: Well, we are mapping a section, so same condition must 4868 * be hold like during promotion. It looks that only RW mapping 4869 * is done here, so readonly mapping must be done elsewhere. 4870 */ 4871 l1prot = PTE1_U | PTE1_NG | PTE1_RW | PTE1_M | PTE1_A; 4872 l1attr = ATTR_TO_L1(vm_memattr_to_pte2(pat_mode)); 4873 PMAP_LOCK(pmap); 4874 for (pa = pte2_pa; pa < pte2_pa + size; pa += PTE1_SIZE) { 4875 pte1p = pmap_pte1(pmap, addr); 4876 if (!pte1_is_valid(pte1_load(pte1p))) { 4877 pte1_store(pte1p, PTE1(pa, l1prot, l1attr)); 4878 pmap->pm_stats.resident_count += PTE1_SIZE / 4879 PAGE_SIZE; 4880 pmap_pte1_mappings++; 4881 } 4882 /* Else continue on if the PTE1 is already valid. */ 4883 addr += PTE1_SIZE; 4884 } 4885 PMAP_UNLOCK(pmap); 4886 } 4887 } 4888 4889 /* 4890 * Do the things to protect a 1mpage in a process. 4891 */ 4892 static void 4893 pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 4894 vm_prot_t prot) 4895 { 4896 pt1_entry_t npte1, opte1; 4897 vm_offset_t eva, va; 4898 vm_page_t m; 4899 4900 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4901 KASSERT((sva & PTE1_OFFSET) == 0, 4902 ("%s: sva is not 1mpage aligned", __func__)); 4903 4904 opte1 = npte1 = pte1_load(pte1p); 4905 if (pte1_is_managed(opte1) && pte1_is_dirty(opte1)) { 4906 eva = sva + PTE1_SIZE; 4907 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 4908 va < eva; va += PAGE_SIZE, m++) 4909 vm_page_dirty(m); 4910 } 4911 if ((prot & VM_PROT_WRITE) == 0) 4912 npte1 |= PTE1_RO | PTE1_NM; 4913 if ((prot & VM_PROT_EXECUTE) == 0) 4914 npte1 |= PTE1_NX; 4915 4916 /* 4917 * QQQ: Herein, execute permission is never set. 4918 * It only can be cleared. So, no icache 4919 * syncing is needed. 4920 */ 4921 4922 if (npte1 != opte1) { 4923 pte1_store(pte1p, npte1); 4924 pmap_tlb_flush(pmap, sva); 4925 } 4926 } 4927 4928 /* 4929 * Set the physical protection on the 4930 * specified range of this map as requested. 4931 */ 4932 void 4933 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 4934 { 4935 boolean_t pv_lists_locked; 4936 vm_offset_t nextva; 4937 pt1_entry_t *pte1p, pte1; 4938 pt2_entry_t *pte2p, opte2, npte2; 4939 4940 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 4941 if (prot == VM_PROT_NONE) { 4942 pmap_remove(pmap, sva, eva); 4943 return; 4944 } 4945 4946 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 4947 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 4948 return; 4949 4950 if (pmap_is_current(pmap)) 4951 pv_lists_locked = FALSE; 4952 else { 4953 pv_lists_locked = TRUE; 4954 resume: 4955 rw_wlock(&pvh_global_lock); 4956 sched_pin(); 4957 } 4958 4959 PMAP_LOCK(pmap); 4960 for (; sva < eva; sva = nextva) { 4961 /* 4962 * Calculate address for next L2 page table. 4963 */ 4964 nextva = pte1_trunc(sva + PTE1_SIZE); 4965 if (nextva < sva) 4966 nextva = eva; 4967 4968 pte1p = pmap_pte1(pmap, sva); 4969 pte1 = pte1_load(pte1p); 4970 4971 /* 4972 * Weed out invalid mappings. Note: we assume that L1 page 4973 * page table is always allocated, and in kernel virtual. 4974 */ 4975 if (pte1 == 0) 4976 continue; 4977 4978 if (pte1_is_section(pte1)) { 4979 /* 4980 * Are we protecting the entire large page? If not, 4981 * demote the mapping and fall through. 4982 */ 4983 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4984 pmap_protect_pte1(pmap, pte1p, sva, prot); 4985 continue; 4986 } else { 4987 if (!pv_lists_locked) { 4988 pv_lists_locked = TRUE; 4989 if (!rw_try_wlock(&pvh_global_lock)) { 4990 PMAP_UNLOCK(pmap); 4991 goto resume; 4992 } 4993 sched_pin(); 4994 } 4995 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4996 /* 4997 * The large page mapping 4998 * was destroyed. 4999 */ 5000 continue; 5001 } 5002 #ifdef INVARIANTS 5003 else { 5004 /* Update pte1 after demotion */ 5005 pte1 = pte1_load(pte1p); 5006 } 5007 #endif 5008 } 5009 } 5010 5011 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5012 " is not link", __func__, pmap, sva, pte1, pte1p)); 5013 5014 /* 5015 * Limit our scan to either the end of the va represented 5016 * by the current L2 page table page, or to the end of the 5017 * range being protected. 5018 */ 5019 if (nextva > eva) 5020 nextva = eva; 5021 5022 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5023 sva += PAGE_SIZE) { 5024 vm_page_t m; 5025 5026 opte2 = npte2 = pte2_load(pte2p); 5027 if (!pte2_is_valid(opte2)) 5028 continue; 5029 5030 if ((prot & VM_PROT_WRITE) == 0) { 5031 if (pte2_is_managed(opte2) && 5032 pte2_is_dirty(opte2)) { 5033 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 5034 vm_page_dirty(m); 5035 } 5036 npte2 |= PTE2_RO | PTE2_NM; 5037 } 5038 5039 if ((prot & VM_PROT_EXECUTE) == 0) 5040 npte2 |= PTE2_NX; 5041 5042 /* 5043 * QQQ: Herein, execute permission is never set. 5044 * It only can be cleared. So, no icache 5045 * syncing is needed. 5046 */ 5047 5048 if (npte2 != opte2) { 5049 pte2_store(pte2p, npte2); 5050 pmap_tlb_flush(pmap, sva); 5051 } 5052 } 5053 } 5054 if (pv_lists_locked) { 5055 sched_unpin(); 5056 rw_wunlock(&pvh_global_lock); 5057 } 5058 PMAP_UNLOCK(pmap); 5059 } 5060 5061 /* 5062 * pmap_pvh_wired_mappings: 5063 * 5064 * Return the updated number "count" of managed mappings that are wired. 5065 */ 5066 static int 5067 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 5068 { 5069 pmap_t pmap; 5070 pt1_entry_t pte1; 5071 pt2_entry_t pte2; 5072 pv_entry_t pv; 5073 5074 rw_assert(&pvh_global_lock, RA_WLOCKED); 5075 sched_pin(); 5076 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5077 pmap = PV_PMAP(pv); 5078 PMAP_LOCK(pmap); 5079 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5080 if (pte1_is_section(pte1)) { 5081 if (pte1_is_wired(pte1)) 5082 count++; 5083 } else { 5084 KASSERT(pte1_is_link(pte1), 5085 ("%s: pte1 %#x is not link", __func__, pte1)); 5086 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5087 if (pte2_is_wired(pte2)) 5088 count++; 5089 } 5090 PMAP_UNLOCK(pmap); 5091 } 5092 sched_unpin(); 5093 return (count); 5094 } 5095 5096 /* 5097 * pmap_page_wired_mappings: 5098 * 5099 * Return the number of managed mappings to the given physical page 5100 * that are wired. 5101 */ 5102 int 5103 pmap_page_wired_mappings(vm_page_t m) 5104 { 5105 int count; 5106 5107 count = 0; 5108 if ((m->oflags & VPO_UNMANAGED) != 0) 5109 return (count); 5110 rw_wlock(&pvh_global_lock); 5111 count = pmap_pvh_wired_mappings(&m->md, count); 5112 if ((m->flags & PG_FICTITIOUS) == 0) { 5113 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 5114 count); 5115 } 5116 rw_wunlock(&pvh_global_lock); 5117 return (count); 5118 } 5119 5120 /* 5121 * Returns TRUE if any of the given mappings were used to modify 5122 * physical memory. Otherwise, returns FALSE. Both page and 1mpage 5123 * mappings are supported. 5124 */ 5125 static boolean_t 5126 pmap_is_modified_pvh(struct md_page *pvh) 5127 { 5128 pv_entry_t pv; 5129 pt1_entry_t pte1; 5130 pt2_entry_t pte2; 5131 pmap_t pmap; 5132 boolean_t rv; 5133 5134 rw_assert(&pvh_global_lock, RA_WLOCKED); 5135 rv = FALSE; 5136 sched_pin(); 5137 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5138 pmap = PV_PMAP(pv); 5139 PMAP_LOCK(pmap); 5140 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5141 if (pte1_is_section(pte1)) { 5142 rv = pte1_is_dirty(pte1); 5143 } else { 5144 KASSERT(pte1_is_link(pte1), 5145 ("%s: pte1 %#x is not link", __func__, pte1)); 5146 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5147 rv = pte2_is_dirty(pte2); 5148 } 5149 PMAP_UNLOCK(pmap); 5150 if (rv) 5151 break; 5152 } 5153 sched_unpin(); 5154 return (rv); 5155 } 5156 5157 /* 5158 * pmap_is_modified: 5159 * 5160 * Return whether or not the specified physical page was modified 5161 * in any physical maps. 5162 */ 5163 boolean_t 5164 pmap_is_modified(vm_page_t m) 5165 { 5166 boolean_t rv; 5167 5168 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5169 ("%s: page %p is not managed", __func__, m)); 5170 5171 /* 5172 * If the page is not busied then this check is racy. 5173 */ 5174 if (!pmap_page_is_write_mapped(m)) 5175 return (FALSE); 5176 rw_wlock(&pvh_global_lock); 5177 rv = pmap_is_modified_pvh(&m->md) || 5178 ((m->flags & PG_FICTITIOUS) == 0 && 5179 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5180 rw_wunlock(&pvh_global_lock); 5181 return (rv); 5182 } 5183 5184 /* 5185 * pmap_is_prefaultable: 5186 * 5187 * Return whether or not the specified virtual address is eligible 5188 * for prefault. 5189 */ 5190 boolean_t 5191 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 5192 { 5193 pt1_entry_t pte1; 5194 pt2_entry_t pte2; 5195 boolean_t rv; 5196 5197 rv = FALSE; 5198 PMAP_LOCK(pmap); 5199 pte1 = pte1_load(pmap_pte1(pmap, addr)); 5200 if (pte1_is_link(pte1)) { 5201 pte2 = pte2_load(pt2map_entry(addr)); 5202 rv = !pte2_is_valid(pte2) ; 5203 } 5204 PMAP_UNLOCK(pmap); 5205 return (rv); 5206 } 5207 5208 /* 5209 * Returns TRUE if any of the given mappings were referenced and FALSE 5210 * otherwise. Both page and 1mpage mappings are supported. 5211 */ 5212 static boolean_t 5213 pmap_is_referenced_pvh(struct md_page *pvh) 5214 { 5215 5216 pv_entry_t pv; 5217 pt1_entry_t pte1; 5218 pt2_entry_t pte2; 5219 pmap_t pmap; 5220 boolean_t rv; 5221 5222 rw_assert(&pvh_global_lock, RA_WLOCKED); 5223 rv = FALSE; 5224 sched_pin(); 5225 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5226 pmap = PV_PMAP(pv); 5227 PMAP_LOCK(pmap); 5228 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5229 if (pte1_is_section(pte1)) { 5230 rv = (pte1 & (PTE1_A | PTE1_V)) == (PTE1_A | PTE1_V); 5231 } else { 5232 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5233 rv = (pte2 & (PTE2_A | PTE2_V)) == (PTE2_A | PTE2_V); 5234 } 5235 PMAP_UNLOCK(pmap); 5236 if (rv) 5237 break; 5238 } 5239 sched_unpin(); 5240 return (rv); 5241 } 5242 5243 /* 5244 * pmap_is_referenced: 5245 * 5246 * Return whether or not the specified physical page was referenced 5247 * in any physical maps. 5248 */ 5249 boolean_t 5250 pmap_is_referenced(vm_page_t m) 5251 { 5252 boolean_t rv; 5253 5254 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5255 ("%s: page %p is not managed", __func__, m)); 5256 rw_wlock(&pvh_global_lock); 5257 rv = pmap_is_referenced_pvh(&m->md) || 5258 ((m->flags & PG_FICTITIOUS) == 0 && 5259 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5260 rw_wunlock(&pvh_global_lock); 5261 return (rv); 5262 } 5263 5264 /* 5265 * pmap_ts_referenced: 5266 * 5267 * Return a count of reference bits for a page, clearing those bits. 5268 * It is not necessary for every reference bit to be cleared, but it 5269 * is necessary that 0 only be returned when there are truly no 5270 * reference bits set. 5271 * 5272 * As an optimization, update the page's dirty field if a modified bit is 5273 * found while counting reference bits. This opportunistic update can be 5274 * performed at low cost and can eliminate the need for some future calls 5275 * to pmap_is_modified(). However, since this function stops after 5276 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5277 * dirty pages. Those dirty pages will only be detected by a future call 5278 * to pmap_is_modified(). 5279 */ 5280 int 5281 pmap_ts_referenced(vm_page_t m) 5282 { 5283 struct md_page *pvh; 5284 pv_entry_t pv, pvf; 5285 pmap_t pmap; 5286 pt1_entry_t *pte1p, opte1; 5287 pt2_entry_t *pte2p, opte2; 5288 vm_paddr_t pa; 5289 int rtval = 0; 5290 5291 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5292 ("%s: page %p is not managed", __func__, m)); 5293 pa = VM_PAGE_TO_PHYS(m); 5294 pvh = pa_to_pvh(pa); 5295 rw_wlock(&pvh_global_lock); 5296 sched_pin(); 5297 if ((m->flags & PG_FICTITIOUS) != 0 || 5298 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5299 goto small_mappings; 5300 pv = pvf; 5301 do { 5302 pmap = PV_PMAP(pv); 5303 PMAP_LOCK(pmap); 5304 pte1p = pmap_pte1(pmap, pv->pv_va); 5305 opte1 = pte1_load(pte1p); 5306 if (pte1_is_dirty(opte1)) { 5307 /* 5308 * Although "opte1" is mapping a 1MB page, because 5309 * this function is called at a 4KB page granularity, 5310 * we only update the 4KB page under test. 5311 */ 5312 vm_page_dirty(m); 5313 } 5314 if ((opte1 & PTE1_A) != 0) { 5315 /* 5316 * Since this reference bit is shared by 256 4KB pages, 5317 * it should not be cleared every time it is tested. 5318 * Apply a simple "hash" function on the physical page 5319 * number, the virtual section number, and the pmap 5320 * address to select one 4KB page out of the 256 5321 * on which testing the reference bit will result 5322 * in clearing that bit. This function is designed 5323 * to avoid the selection of the same 4KB page 5324 * for every 1MB page mapping. 5325 * 5326 * On demotion, a mapping that hasn't been referenced 5327 * is simply destroyed. To avoid the possibility of a 5328 * subsequent page fault on a demoted wired mapping, 5329 * always leave its reference bit set. Moreover, 5330 * since the section is wired, the current state of 5331 * its reference bit won't affect page replacement. 5332 */ 5333 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PTE1_SHIFT) ^ 5334 (uintptr_t)pmap) & (NPTE2_IN_PG - 1)) == 0 && 5335 !pte1_is_wired(opte1)) { 5336 pte1_clear_bit(pte1p, PTE1_A); 5337 pmap_tlb_flush(pmap, pv->pv_va); 5338 } 5339 rtval++; 5340 } 5341 PMAP_UNLOCK(pmap); 5342 /* Rotate the PV list if it has more than one entry. */ 5343 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5344 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5345 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5346 } 5347 if (rtval >= PMAP_TS_REFERENCED_MAX) 5348 goto out; 5349 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5350 small_mappings: 5351 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5352 goto out; 5353 pv = pvf; 5354 do { 5355 pmap = PV_PMAP(pv); 5356 PMAP_LOCK(pmap); 5357 pte1p = pmap_pte1(pmap, pv->pv_va); 5358 KASSERT(pte1_is_link(pte1_load(pte1p)), 5359 ("%s: not found a link in page %p's pv list", __func__, m)); 5360 5361 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5362 opte2 = pte2_load(pte2p); 5363 if (pte2_is_dirty(opte2)) 5364 vm_page_dirty(m); 5365 if ((opte2 & PTE2_A) != 0) { 5366 pte2_clear_bit(pte2p, PTE2_A); 5367 pmap_tlb_flush(pmap, pv->pv_va); 5368 rtval++; 5369 } 5370 PMAP_UNLOCK(pmap); 5371 /* Rotate the PV list if it has more than one entry. */ 5372 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5373 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5374 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5375 } 5376 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5377 PMAP_TS_REFERENCED_MAX); 5378 out: 5379 sched_unpin(); 5380 rw_wunlock(&pvh_global_lock); 5381 return (rtval); 5382 } 5383 5384 /* 5385 * Clear the wired attribute from the mappings for the specified range of 5386 * addresses in the given pmap. Every valid mapping within that range 5387 * must have the wired attribute set. In contrast, invalid mappings 5388 * cannot have the wired attribute set, so they are ignored. 5389 * 5390 * The wired attribute of the page table entry is not a hardware feature, 5391 * so there is no need to invalidate any TLB entries. 5392 */ 5393 void 5394 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5395 { 5396 vm_offset_t nextva; 5397 pt1_entry_t *pte1p, pte1; 5398 pt2_entry_t *pte2p, pte2; 5399 boolean_t pv_lists_locked; 5400 5401 if (pmap_is_current(pmap)) 5402 pv_lists_locked = FALSE; 5403 else { 5404 pv_lists_locked = TRUE; 5405 resume: 5406 rw_wlock(&pvh_global_lock); 5407 sched_pin(); 5408 } 5409 PMAP_LOCK(pmap); 5410 for (; sva < eva; sva = nextva) { 5411 nextva = pte1_trunc(sva + PTE1_SIZE); 5412 if (nextva < sva) 5413 nextva = eva; 5414 5415 pte1p = pmap_pte1(pmap, sva); 5416 pte1 = pte1_load(pte1p); 5417 5418 /* 5419 * Weed out invalid mappings. Note: we assume that L1 page 5420 * page table is always allocated, and in kernel virtual. 5421 */ 5422 if (pte1 == 0) 5423 continue; 5424 5425 if (pte1_is_section(pte1)) { 5426 if (!pte1_is_wired(pte1)) 5427 panic("%s: pte1 %#x not wired", __func__, pte1); 5428 5429 /* 5430 * Are we unwiring the entire large page? If not, 5431 * demote the mapping and fall through. 5432 */ 5433 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 5434 pte1_clear_bit(pte1p, PTE1_W); 5435 pmap->pm_stats.wired_count -= PTE1_SIZE / 5436 PAGE_SIZE; 5437 continue; 5438 } else { 5439 if (!pv_lists_locked) { 5440 pv_lists_locked = TRUE; 5441 if (!rw_try_wlock(&pvh_global_lock)) { 5442 PMAP_UNLOCK(pmap); 5443 /* Repeat sva. */ 5444 goto resume; 5445 } 5446 sched_pin(); 5447 } 5448 if (!pmap_demote_pte1(pmap, pte1p, sva)) 5449 panic("%s: demotion failed", __func__); 5450 #ifdef INVARIANTS 5451 else { 5452 /* Update pte1 after demotion */ 5453 pte1 = pte1_load(pte1p); 5454 } 5455 #endif 5456 } 5457 } 5458 5459 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5460 " is not link", __func__, pmap, sva, pte1, pte1p)); 5461 5462 /* 5463 * Limit our scan to either the end of the va represented 5464 * by the current L2 page table page, or to the end of the 5465 * range being protected. 5466 */ 5467 if (nextva > eva) 5468 nextva = eva; 5469 5470 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5471 sva += PAGE_SIZE) { 5472 pte2 = pte2_load(pte2p); 5473 if (!pte2_is_valid(pte2)) 5474 continue; 5475 if (!pte2_is_wired(pte2)) 5476 panic("%s: pte2 %#x is missing PTE2_W", 5477 __func__, pte2); 5478 5479 /* 5480 * PTE2_W must be cleared atomically. Although the pmap 5481 * lock synchronizes access to PTE2_W, another processor 5482 * could be changing PTE2_NM and/or PTE2_A concurrently. 5483 */ 5484 pte2_clear_bit(pte2p, PTE2_W); 5485 pmap->pm_stats.wired_count--; 5486 } 5487 } 5488 if (pv_lists_locked) { 5489 sched_unpin(); 5490 rw_wunlock(&pvh_global_lock); 5491 } 5492 PMAP_UNLOCK(pmap); 5493 } 5494 5495 /* 5496 * Clear the write and modified bits in each of the given page's mappings. 5497 */ 5498 void 5499 pmap_remove_write(vm_page_t m) 5500 { 5501 struct md_page *pvh; 5502 pv_entry_t next_pv, pv; 5503 pmap_t pmap; 5504 pt1_entry_t *pte1p; 5505 pt2_entry_t *pte2p, opte2; 5506 vm_offset_t va; 5507 5508 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5509 ("%s: page %p is not managed", __func__, m)); 5510 vm_page_assert_busied(m); 5511 5512 if (!pmap_page_is_write_mapped(m)) 5513 return; 5514 rw_wlock(&pvh_global_lock); 5515 sched_pin(); 5516 if ((m->flags & PG_FICTITIOUS) != 0) 5517 goto small_mappings; 5518 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5519 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5520 va = pv->pv_va; 5521 pmap = PV_PMAP(pv); 5522 PMAP_LOCK(pmap); 5523 pte1p = pmap_pte1(pmap, va); 5524 if (!(pte1_load(pte1p) & PTE1_RO)) 5525 (void)pmap_demote_pte1(pmap, pte1p, va); 5526 PMAP_UNLOCK(pmap); 5527 } 5528 small_mappings: 5529 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5530 pmap = PV_PMAP(pv); 5531 PMAP_LOCK(pmap); 5532 pte1p = pmap_pte1(pmap, pv->pv_va); 5533 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5534 " a section in page %p's pv list", __func__, m)); 5535 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5536 opte2 = pte2_load(pte2p); 5537 if (!(opte2 & PTE2_RO)) { 5538 pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM); 5539 if (pte2_is_dirty(opte2)) 5540 vm_page_dirty(m); 5541 pmap_tlb_flush(pmap, pv->pv_va); 5542 } 5543 PMAP_UNLOCK(pmap); 5544 } 5545 vm_page_aflag_clear(m, PGA_WRITEABLE); 5546 sched_unpin(); 5547 rw_wunlock(&pvh_global_lock); 5548 } 5549 5550 /* 5551 * Apply the given advice to the specified range of addresses within the 5552 * given pmap. Depending on the advice, clear the referenced and/or 5553 * modified flags in each mapping and set the mapped page's dirty field. 5554 */ 5555 void 5556 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 5557 { 5558 pt1_entry_t *pte1p, opte1; 5559 pt2_entry_t *pte2p, pte2; 5560 vm_offset_t pdnxt; 5561 vm_page_t m; 5562 boolean_t pv_lists_locked; 5563 5564 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5565 return; 5566 if (pmap_is_current(pmap)) 5567 pv_lists_locked = FALSE; 5568 else { 5569 pv_lists_locked = TRUE; 5570 resume: 5571 rw_wlock(&pvh_global_lock); 5572 sched_pin(); 5573 } 5574 PMAP_LOCK(pmap); 5575 for (; sva < eva; sva = pdnxt) { 5576 pdnxt = pte1_trunc(sva + PTE1_SIZE); 5577 if (pdnxt < sva) 5578 pdnxt = eva; 5579 pte1p = pmap_pte1(pmap, sva); 5580 opte1 = pte1_load(pte1p); 5581 if (!pte1_is_valid(opte1)) /* XXX */ 5582 continue; 5583 else if (pte1_is_section(opte1)) { 5584 if (!pte1_is_managed(opte1)) 5585 continue; 5586 if (!pv_lists_locked) { 5587 pv_lists_locked = TRUE; 5588 if (!rw_try_wlock(&pvh_global_lock)) { 5589 PMAP_UNLOCK(pmap); 5590 goto resume; 5591 } 5592 sched_pin(); 5593 } 5594 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 5595 /* 5596 * The large page mapping was destroyed. 5597 */ 5598 continue; 5599 } 5600 5601 /* 5602 * Unless the page mappings are wired, remove the 5603 * mapping to a single page so that a subsequent 5604 * access may repromote. Since the underlying L2 page 5605 * table is fully populated, this removal never 5606 * frees a L2 page table page. 5607 */ 5608 if (!pte1_is_wired(opte1)) { 5609 pte2p = pmap_pte2_quick(pmap, sva); 5610 KASSERT(pte2_is_valid(pte2_load(pte2p)), 5611 ("%s: invalid PTE2", __func__)); 5612 pmap_remove_pte2(pmap, pte2p, sva, NULL); 5613 } 5614 } 5615 if (pdnxt > eva) 5616 pdnxt = eva; 5617 for (pte2p = pmap_pte2_quick(pmap, sva); sva != pdnxt; pte2p++, 5618 sva += PAGE_SIZE) { 5619 pte2 = pte2_load(pte2p); 5620 if (!pte2_is_valid(pte2) || !pte2_is_managed(pte2)) 5621 continue; 5622 else if (pte2_is_dirty(pte2)) { 5623 if (advice == MADV_DONTNEED) { 5624 /* 5625 * Future calls to pmap_is_modified() 5626 * can be avoided by making the page 5627 * dirty now. 5628 */ 5629 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 5630 vm_page_dirty(m); 5631 } 5632 pte2_set_bit(pte2p, PTE2_NM); 5633 pte2_clear_bit(pte2p, PTE2_A); 5634 } else if ((pte2 & PTE2_A) != 0) 5635 pte2_clear_bit(pte2p, PTE2_A); 5636 else 5637 continue; 5638 pmap_tlb_flush(pmap, sva); 5639 } 5640 } 5641 if (pv_lists_locked) { 5642 sched_unpin(); 5643 rw_wunlock(&pvh_global_lock); 5644 } 5645 PMAP_UNLOCK(pmap); 5646 } 5647 5648 /* 5649 * Clear the modify bits on the specified physical page. 5650 */ 5651 void 5652 pmap_clear_modify(vm_page_t m) 5653 { 5654 struct md_page *pvh; 5655 pv_entry_t next_pv, pv; 5656 pmap_t pmap; 5657 pt1_entry_t *pte1p, opte1; 5658 pt2_entry_t *pte2p, opte2; 5659 vm_offset_t va; 5660 5661 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5662 ("%s: page %p is not managed", __func__, m)); 5663 vm_page_assert_busied(m); 5664 5665 if (!pmap_page_is_write_mapped(m)) 5666 return; 5667 rw_wlock(&pvh_global_lock); 5668 sched_pin(); 5669 if ((m->flags & PG_FICTITIOUS) != 0) 5670 goto small_mappings; 5671 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5672 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5673 va = pv->pv_va; 5674 pmap = PV_PMAP(pv); 5675 PMAP_LOCK(pmap); 5676 pte1p = pmap_pte1(pmap, va); 5677 opte1 = pte1_load(pte1p); 5678 if (!(opte1 & PTE1_RO)) { 5679 if (pmap_demote_pte1(pmap, pte1p, va) && 5680 !pte1_is_wired(opte1)) { 5681 /* 5682 * Write protect the mapping to a 5683 * single page so that a subsequent 5684 * write access may repromote. 5685 */ 5686 va += VM_PAGE_TO_PHYS(m) - pte1_pa(opte1); 5687 pte2p = pmap_pte2_quick(pmap, va); 5688 opte2 = pte2_load(pte2p); 5689 if ((opte2 & PTE2_V)) { 5690 pte2_set_bit(pte2p, PTE2_NM | PTE2_RO); 5691 vm_page_dirty(m); 5692 pmap_tlb_flush(pmap, va); 5693 } 5694 } 5695 } 5696 PMAP_UNLOCK(pmap); 5697 } 5698 small_mappings: 5699 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5700 pmap = PV_PMAP(pv); 5701 PMAP_LOCK(pmap); 5702 pte1p = pmap_pte1(pmap, pv->pv_va); 5703 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5704 " a section in page %p's pv list", __func__, m)); 5705 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5706 if (pte2_is_dirty(pte2_load(pte2p))) { 5707 pte2_set_bit(pte2p, PTE2_NM); 5708 pmap_tlb_flush(pmap, pv->pv_va); 5709 } 5710 PMAP_UNLOCK(pmap); 5711 } 5712 sched_unpin(); 5713 rw_wunlock(&pvh_global_lock); 5714 } 5715 5716 /* 5717 * Sets the memory attribute for the specified page. 5718 */ 5719 void 5720 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5721 { 5722 pt2_entry_t *cmap2_pte2p; 5723 vm_memattr_t oma; 5724 vm_paddr_t pa; 5725 struct pcpu *pc; 5726 5727 oma = m->md.pat_mode; 5728 m->md.pat_mode = ma; 5729 5730 CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, 5731 VM_PAGE_TO_PHYS(m), oma, ma); 5732 if ((m->flags & PG_FICTITIOUS) != 0) 5733 return; 5734 #if 0 5735 /* 5736 * If "m" is a normal page, flush it from the cache. 5737 * 5738 * First, try to find an existing mapping of the page by sf 5739 * buffer. sf_buf_invalidate_cache() modifies mapping and 5740 * flushes the cache. 5741 */ 5742 if (sf_buf_invalidate_cache(m, oma)) 5743 return; 5744 #endif 5745 /* 5746 * If page is not mapped by sf buffer, map the page 5747 * transient and do invalidation. 5748 */ 5749 if (ma != oma) { 5750 pa = VM_PAGE_TO_PHYS(m); 5751 sched_pin(); 5752 pc = get_pcpu(); 5753 cmap2_pte2p = pc->pc_cmap2_pte2p; 5754 mtx_lock(&pc->pc_cmap_lock); 5755 if (pte2_load(cmap2_pte2p) != 0) 5756 panic("%s: CMAP2 busy", __func__); 5757 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 5758 vm_memattr_to_pte2(ma))); 5759 dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); 5760 pte2_clear(cmap2_pte2p); 5761 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5762 sched_unpin(); 5763 mtx_unlock(&pc->pc_cmap_lock); 5764 } 5765 } 5766 5767 /* 5768 * Miscellaneous support routines follow 5769 */ 5770 5771 /* 5772 * Returns TRUE if the given page is mapped individually or as part of 5773 * a 1mpage. Otherwise, returns FALSE. 5774 */ 5775 boolean_t 5776 pmap_page_is_mapped(vm_page_t m) 5777 { 5778 boolean_t rv; 5779 5780 if ((m->oflags & VPO_UNMANAGED) != 0) 5781 return (FALSE); 5782 rw_wlock(&pvh_global_lock); 5783 rv = !TAILQ_EMPTY(&m->md.pv_list) || 5784 ((m->flags & PG_FICTITIOUS) == 0 && 5785 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 5786 rw_wunlock(&pvh_global_lock); 5787 return (rv); 5788 } 5789 5790 /* 5791 * Returns true if the pmap's pv is one of the first 5792 * 16 pvs linked to from this page. This count may 5793 * be changed upwards or downwards in the future; it 5794 * is only necessary that true be returned for a small 5795 * subset of pmaps for proper page aging. 5796 */ 5797 boolean_t 5798 pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 5799 { 5800 struct md_page *pvh; 5801 pv_entry_t pv; 5802 int loops = 0; 5803 boolean_t rv; 5804 5805 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5806 ("%s: page %p is not managed", __func__, m)); 5807 rv = FALSE; 5808 rw_wlock(&pvh_global_lock); 5809 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5810 if (PV_PMAP(pv) == pmap) { 5811 rv = TRUE; 5812 break; 5813 } 5814 loops++; 5815 if (loops >= 16) 5816 break; 5817 } 5818 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 5819 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5820 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5821 if (PV_PMAP(pv) == pmap) { 5822 rv = TRUE; 5823 break; 5824 } 5825 loops++; 5826 if (loops >= 16) 5827 break; 5828 } 5829 } 5830 rw_wunlock(&pvh_global_lock); 5831 return (rv); 5832 } 5833 5834 /* 5835 * pmap_zero_page zeros the specified hardware page by mapping 5836 * the page into KVM and using bzero to clear its contents. 5837 */ 5838 void 5839 pmap_zero_page(vm_page_t m) 5840 { 5841 pt2_entry_t *cmap2_pte2p; 5842 struct pcpu *pc; 5843 5844 sched_pin(); 5845 pc = get_pcpu(); 5846 cmap2_pte2p = pc->pc_cmap2_pte2p; 5847 mtx_lock(&pc->pc_cmap_lock); 5848 if (pte2_load(cmap2_pte2p) != 0) 5849 panic("%s: CMAP2 busy", __func__); 5850 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5851 vm_page_pte2_attr(m))); 5852 pagezero(pc->pc_cmap2_addr); 5853 pte2_clear(cmap2_pte2p); 5854 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5855 sched_unpin(); 5856 mtx_unlock(&pc->pc_cmap_lock); 5857 } 5858 5859 /* 5860 * pmap_zero_page_area zeros the specified hardware page by mapping 5861 * the page into KVM and using bzero to clear its contents. 5862 * 5863 * off and size may not cover an area beyond a single hardware page. 5864 */ 5865 void 5866 pmap_zero_page_area(vm_page_t m, int off, int size) 5867 { 5868 pt2_entry_t *cmap2_pte2p; 5869 struct pcpu *pc; 5870 5871 sched_pin(); 5872 pc = get_pcpu(); 5873 cmap2_pte2p = pc->pc_cmap2_pte2p; 5874 mtx_lock(&pc->pc_cmap_lock); 5875 if (pte2_load(cmap2_pte2p) != 0) 5876 panic("%s: CMAP2 busy", __func__); 5877 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5878 vm_page_pte2_attr(m))); 5879 if (off == 0 && size == PAGE_SIZE) 5880 pagezero(pc->pc_cmap2_addr); 5881 else 5882 bzero(pc->pc_cmap2_addr + off, size); 5883 pte2_clear(cmap2_pte2p); 5884 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5885 sched_unpin(); 5886 mtx_unlock(&pc->pc_cmap_lock); 5887 } 5888 5889 /* 5890 * pmap_copy_page copies the specified (machine independent) 5891 * page by mapping the page into virtual memory and using 5892 * bcopy to copy the page, one machine dependent page at a 5893 * time. 5894 */ 5895 void 5896 pmap_copy_page(vm_page_t src, vm_page_t dst) 5897 { 5898 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5899 struct pcpu *pc; 5900 5901 sched_pin(); 5902 pc = get_pcpu(); 5903 cmap1_pte2p = pc->pc_cmap1_pte2p; 5904 cmap2_pte2p = pc->pc_cmap2_pte2p; 5905 mtx_lock(&pc->pc_cmap_lock); 5906 if (pte2_load(cmap1_pte2p) != 0) 5907 panic("%s: CMAP1 busy", __func__); 5908 if (pte2_load(cmap2_pte2p) != 0) 5909 panic("%s: CMAP2 busy", __func__); 5910 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src), 5911 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src))); 5912 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst), 5913 PTE2_AP_KRW, vm_page_pte2_attr(dst))); 5914 bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE); 5915 pte2_clear(cmap1_pte2p); 5916 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5917 pte2_clear(cmap2_pte2p); 5918 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5919 sched_unpin(); 5920 mtx_unlock(&pc->pc_cmap_lock); 5921 } 5922 5923 int unmapped_buf_allowed = 1; 5924 5925 void 5926 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 5927 vm_offset_t b_offset, int xfersize) 5928 { 5929 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5930 vm_page_t a_pg, b_pg; 5931 char *a_cp, *b_cp; 5932 vm_offset_t a_pg_offset, b_pg_offset; 5933 struct pcpu *pc; 5934 int cnt; 5935 5936 sched_pin(); 5937 pc = get_pcpu(); 5938 cmap1_pte2p = pc->pc_cmap1_pte2p; 5939 cmap2_pte2p = pc->pc_cmap2_pte2p; 5940 mtx_lock(&pc->pc_cmap_lock); 5941 if (pte2_load(cmap1_pte2p) != 0) 5942 panic("pmap_copy_pages: CMAP1 busy"); 5943 if (pte2_load(cmap2_pte2p) != 0) 5944 panic("pmap_copy_pages: CMAP2 busy"); 5945 while (xfersize > 0) { 5946 a_pg = ma[a_offset >> PAGE_SHIFT]; 5947 a_pg_offset = a_offset & PAGE_MASK; 5948 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 5949 b_pg = mb[b_offset >> PAGE_SHIFT]; 5950 b_pg_offset = b_offset & PAGE_MASK; 5951 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 5952 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg), 5953 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg))); 5954 tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr); 5955 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg), 5956 PTE2_AP_KRW, vm_page_pte2_attr(b_pg))); 5957 tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr); 5958 a_cp = pc->pc_cmap1_addr + a_pg_offset; 5959 b_cp = pc->pc_cmap2_addr + b_pg_offset; 5960 bcopy(a_cp, b_cp, cnt); 5961 a_offset += cnt; 5962 b_offset += cnt; 5963 xfersize -= cnt; 5964 } 5965 pte2_clear(cmap1_pte2p); 5966 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5967 pte2_clear(cmap2_pte2p); 5968 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5969 sched_unpin(); 5970 mtx_unlock(&pc->pc_cmap_lock); 5971 } 5972 5973 vm_offset_t 5974 pmap_quick_enter_page(vm_page_t m) 5975 { 5976 struct pcpu *pc; 5977 pt2_entry_t *pte2p; 5978 5979 critical_enter(); 5980 pc = get_pcpu(); 5981 pte2p = pc->pc_qmap_pte2p; 5982 5983 KASSERT(pte2_load(pte2p) == 0, ("%s: PTE2 busy", __func__)); 5984 5985 pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5986 vm_page_pte2_attr(m))); 5987 return (pc->pc_qmap_addr); 5988 } 5989 5990 void 5991 pmap_quick_remove_page(vm_offset_t addr) 5992 { 5993 struct pcpu *pc; 5994 pt2_entry_t *pte2p; 5995 5996 pc = get_pcpu(); 5997 pte2p = pc->pc_qmap_pte2p; 5998 5999 KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__)); 6000 KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__)); 6001 6002 pte2_clear(pte2p); 6003 tlb_flush(pc->pc_qmap_addr); 6004 critical_exit(); 6005 } 6006 6007 /* 6008 * Copy the range specified by src_addr/len 6009 * from the source map to the range dst_addr/len 6010 * in the destination map. 6011 * 6012 * This routine is only advisory and need not do anything. 6013 */ 6014 void 6015 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 6016 vm_offset_t src_addr) 6017 { 6018 struct spglist free; 6019 vm_offset_t addr; 6020 vm_offset_t end_addr = src_addr + len; 6021 vm_offset_t nextva; 6022 6023 if (dst_addr != src_addr) 6024 return; 6025 6026 if (!pmap_is_current(src_pmap)) 6027 return; 6028 6029 rw_wlock(&pvh_global_lock); 6030 if (dst_pmap < src_pmap) { 6031 PMAP_LOCK(dst_pmap); 6032 PMAP_LOCK(src_pmap); 6033 } else { 6034 PMAP_LOCK(src_pmap); 6035 PMAP_LOCK(dst_pmap); 6036 } 6037 sched_pin(); 6038 for (addr = src_addr; addr < end_addr; addr = nextva) { 6039 pt2_entry_t *src_pte2p, *dst_pte2p; 6040 vm_page_t dst_mpt2pg, src_mpt2pg; 6041 pt1_entry_t src_pte1; 6042 u_int pte1_idx; 6043 6044 KASSERT(addr < VM_MAXUSER_ADDRESS, 6045 ("%s: invalid to pmap_copy page tables", __func__)); 6046 6047 nextva = pte1_trunc(addr + PTE1_SIZE); 6048 if (nextva < addr) 6049 nextva = end_addr; 6050 6051 pte1_idx = pte1_index(addr); 6052 src_pte1 = src_pmap->pm_pt1[pte1_idx]; 6053 if (pte1_is_section(src_pte1)) { 6054 if ((addr & PTE1_OFFSET) != 0 || 6055 (addr + PTE1_SIZE) > end_addr) 6056 continue; 6057 if (dst_pmap->pm_pt1[pte1_idx] == 0 && 6058 (!pte1_is_managed(src_pte1) || 6059 pmap_pv_insert_pte1(dst_pmap, addr, src_pte1, 6060 PMAP_ENTER_NORECLAIM))) { 6061 dst_pmap->pm_pt1[pte1_idx] = src_pte1 & 6062 ~PTE1_W; 6063 dst_pmap->pm_stats.resident_count += 6064 PTE1_SIZE / PAGE_SIZE; 6065 pmap_pte1_mappings++; 6066 } 6067 continue; 6068 } else if (!pte1_is_link(src_pte1)) 6069 continue; 6070 6071 src_mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(src_pte1)); 6072 6073 /* 6074 * We leave PT2s to be linked from PT1 even if they are not 6075 * referenced until all PT2s in a page are without reference. 6076 * 6077 * QQQ: It could be changed ... 6078 */ 6079 #if 0 /* single_pt2_link_is_cleared */ 6080 KASSERT(pt2_wirecount_get(src_mpt2pg, pte1_idx) > 0, 6081 ("%s: source page table page is unused", __func__)); 6082 #else 6083 if (pt2_wirecount_get(src_mpt2pg, pte1_idx) == 0) 6084 continue; 6085 #endif 6086 if (nextva > end_addr) 6087 nextva = end_addr; 6088 6089 src_pte2p = pt2map_entry(addr); 6090 while (addr < nextva) { 6091 pt2_entry_t temp_pte2; 6092 temp_pte2 = pte2_load(src_pte2p); 6093 /* 6094 * we only virtual copy managed pages 6095 */ 6096 if (pte2_is_managed(temp_pte2)) { 6097 dst_mpt2pg = pmap_allocpte2(dst_pmap, addr, 6098 PMAP_ENTER_NOSLEEP); 6099 if (dst_mpt2pg == NULL) 6100 goto out; 6101 dst_pte2p = pmap_pte2_quick(dst_pmap, addr); 6102 if (!pte2_is_valid(pte2_load(dst_pte2p)) && 6103 pmap_try_insert_pv_entry(dst_pmap, addr, 6104 PHYS_TO_VM_PAGE(pte2_pa(temp_pte2)))) { 6105 /* 6106 * Clear the wired, modified, and 6107 * accessed (referenced) bits 6108 * during the copy. 6109 */ 6110 temp_pte2 &= ~(PTE2_W | PTE2_A); 6111 temp_pte2 |= PTE2_NM; 6112 pte2_store(dst_pte2p, temp_pte2); 6113 dst_pmap->pm_stats.resident_count++; 6114 } else { 6115 SLIST_INIT(&free); 6116 if (pmap_unwire_pt2(dst_pmap, addr, 6117 dst_mpt2pg, &free)) { 6118 pmap_tlb_flush(dst_pmap, addr); 6119 vm_page_free_pages_toq(&free, 6120 false); 6121 } 6122 goto out; 6123 } 6124 if (pt2_wirecount_get(dst_mpt2pg, pte1_idx) >= 6125 pt2_wirecount_get(src_mpt2pg, pte1_idx)) 6126 break; 6127 } 6128 addr += PAGE_SIZE; 6129 src_pte2p++; 6130 } 6131 } 6132 out: 6133 sched_unpin(); 6134 rw_wunlock(&pvh_global_lock); 6135 PMAP_UNLOCK(src_pmap); 6136 PMAP_UNLOCK(dst_pmap); 6137 } 6138 6139 /* 6140 * Increase the starting virtual address of the given mapping if a 6141 * different alignment might result in more section mappings. 6142 */ 6143 void 6144 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 6145 vm_offset_t *addr, vm_size_t size) 6146 { 6147 vm_offset_t pte1_offset; 6148 6149 if (size < PTE1_SIZE) 6150 return; 6151 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 6152 offset += ptoa(object->pg_color); 6153 pte1_offset = offset & PTE1_OFFSET; 6154 if (size - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) < PTE1_SIZE || 6155 (*addr & PTE1_OFFSET) == pte1_offset) 6156 return; 6157 if ((*addr & PTE1_OFFSET) < pte1_offset) 6158 *addr = pte1_trunc(*addr) + pte1_offset; 6159 else 6160 *addr = pte1_roundup(*addr) + pte1_offset; 6161 } 6162 6163 void 6164 pmap_activate(struct thread *td) 6165 { 6166 pmap_t pmap, oldpmap; 6167 u_int cpuid, ttb; 6168 6169 PDEBUG(9, printf("%s: td = %08x\n", __func__, (uint32_t)td)); 6170 6171 critical_enter(); 6172 pmap = vmspace_pmap(td->td_proc->p_vmspace); 6173 oldpmap = PCPU_GET(curpmap); 6174 cpuid = PCPU_GET(cpuid); 6175 6176 #if defined(SMP) 6177 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 6178 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 6179 #else 6180 CPU_CLR(cpuid, &oldpmap->pm_active); 6181 CPU_SET(cpuid, &pmap->pm_active); 6182 #endif 6183 6184 ttb = pmap_ttb_get(pmap); 6185 6186 /* 6187 * pmap_activate is for the current thread on the current cpu 6188 */ 6189 td->td_pcb->pcb_pagedir = ttb; 6190 cp15_ttbr_set(ttb); 6191 PCPU_SET(curpmap, pmap); 6192 critical_exit(); 6193 } 6194 6195 void 6196 pmap_active_cpus(pmap_t pmap, cpuset_t *res) 6197 { 6198 *res = pmap->pm_active; 6199 } 6200 6201 /* 6202 * Perform the pmap work for mincore(2). If the page is not both referenced and 6203 * modified by this pmap, returns its physical address so that the caller can 6204 * find other mappings. 6205 */ 6206 int 6207 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) 6208 { 6209 pt1_entry_t *pte1p, pte1; 6210 pt2_entry_t *pte2p, pte2; 6211 vm_paddr_t pa; 6212 bool managed; 6213 int val; 6214 6215 PMAP_LOCK(pmap); 6216 pte1p = pmap_pte1(pmap, addr); 6217 pte1 = pte1_load(pte1p); 6218 if (pte1_is_section(pte1)) { 6219 pa = trunc_page(pte1_pa(pte1) | (addr & PTE1_OFFSET)); 6220 managed = pte1_is_managed(pte1); 6221 val = MINCORE_PSIND(1) | MINCORE_INCORE; 6222 if (pte1_is_dirty(pte1)) 6223 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6224 if (pte1 & PTE1_A) 6225 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6226 } else if (pte1_is_link(pte1)) { 6227 pte2p = pmap_pte2(pmap, addr); 6228 pte2 = pte2_load(pte2p); 6229 pmap_pte2_release(pte2p); 6230 pa = pte2_pa(pte2); 6231 managed = pte2_is_managed(pte2); 6232 val = MINCORE_INCORE; 6233 if (pte2_is_dirty(pte2)) 6234 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6235 if (pte2 & PTE2_A) 6236 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6237 } else { 6238 managed = false; 6239 val = 0; 6240 } 6241 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 6242 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 6243 *pap = pa; 6244 } 6245 PMAP_UNLOCK(pmap); 6246 return (val); 6247 } 6248 6249 void 6250 pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) 6251 { 6252 vm_offset_t sva; 6253 uint32_t l2attr; 6254 6255 KASSERT((size & PAGE_MASK) == 0, 6256 ("%s: device mapping not page-sized", __func__)); 6257 6258 sva = va; 6259 l2attr = vm_memattr_to_pte2(VM_MEMATTR_DEVICE); 6260 while (size != 0) { 6261 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, l2attr); 6262 va += PAGE_SIZE; 6263 pa += PAGE_SIZE; 6264 size -= PAGE_SIZE; 6265 } 6266 tlb_flush_range(sva, va - sva); 6267 } 6268 6269 void 6270 pmap_kremove_device(vm_offset_t va, vm_size_t size) 6271 { 6272 vm_offset_t sva; 6273 6274 KASSERT((size & PAGE_MASK) == 0, 6275 ("%s: device mapping not page-sized", __func__)); 6276 6277 sva = va; 6278 while (size != 0) { 6279 pmap_kremove(va); 6280 va += PAGE_SIZE; 6281 size -= PAGE_SIZE; 6282 } 6283 tlb_flush_range(sva, va - sva); 6284 } 6285 6286 void 6287 pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb) 6288 { 6289 6290 pcb->pcb_pagedir = pmap_ttb_get(pmap); 6291 } 6292 6293 /* 6294 * Clean L1 data cache range by physical address. 6295 * The range must be within a single page. 6296 */ 6297 static void 6298 pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr) 6299 { 6300 pt2_entry_t *cmap2_pte2p; 6301 struct pcpu *pc; 6302 6303 KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE, 6304 ("%s: not on single page", __func__)); 6305 6306 sched_pin(); 6307 pc = get_pcpu(); 6308 cmap2_pte2p = pc->pc_cmap2_pte2p; 6309 mtx_lock(&pc->pc_cmap_lock); 6310 if (pte2_load(cmap2_pte2p) != 0) 6311 panic("%s: CMAP2 busy", __func__); 6312 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr)); 6313 dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size); 6314 pte2_clear(cmap2_pte2p); 6315 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6316 sched_unpin(); 6317 mtx_unlock(&pc->pc_cmap_lock); 6318 } 6319 6320 /* 6321 * Sync instruction cache range which is not mapped yet. 6322 */ 6323 void 6324 cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 6325 { 6326 uint32_t len, offset; 6327 vm_page_t m; 6328 6329 /* Write back d-cache on given address range. */ 6330 offset = pa & PAGE_MASK; 6331 for ( ; size != 0; size -= len, pa += len, offset = 0) { 6332 len = min(PAGE_SIZE - offset, size); 6333 m = PHYS_TO_VM_PAGE(pa); 6334 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6335 __func__, pa)); 6336 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6337 } 6338 /* 6339 * I-cache is VIPT. Only way how to flush all virtual mappings 6340 * on given physical address is to invalidate all i-cache. 6341 */ 6342 icache_inv_all(); 6343 } 6344 6345 void 6346 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t size) 6347 { 6348 6349 /* Write back d-cache on given address range. */ 6350 if (va >= VM_MIN_KERNEL_ADDRESS) { 6351 dcache_wb_pou(va, size); 6352 } else { 6353 uint32_t len, offset; 6354 vm_paddr_t pa; 6355 vm_page_t m; 6356 6357 offset = va & PAGE_MASK; 6358 for ( ; size != 0; size -= len, va += len, offset = 0) { 6359 pa = pmap_extract(pmap, va); /* offset is preserved */ 6360 len = min(PAGE_SIZE - offset, size); 6361 m = PHYS_TO_VM_PAGE(pa); 6362 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6363 __func__, pa)); 6364 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6365 } 6366 } 6367 /* 6368 * I-cache is VIPT. Only way how to flush all virtual mappings 6369 * on given physical address is to invalidate all i-cache. 6370 */ 6371 icache_inv_all(); 6372 } 6373 6374 /* 6375 * The implementation of pmap_fault() uses IN_RANGE2() macro which 6376 * depends on the fact that given range size is a power of 2. 6377 */ 6378 CTASSERT(powerof2(NB_IN_PT1)); 6379 CTASSERT(powerof2(PT2MAP_SIZE)); 6380 6381 #define IN_RANGE2(addr, start, size) \ 6382 ((vm_offset_t)(start) == ((vm_offset_t)(addr) & ~((size) - 1))) 6383 6384 /* 6385 * Handle access and R/W emulation faults. 6386 */ 6387 int 6388 pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode) 6389 { 6390 pt1_entry_t *pte1p, pte1; 6391 pt2_entry_t *pte2p, pte2; 6392 6393 if (pmap == NULL) 6394 pmap = kernel_pmap; 6395 6396 /* 6397 * In kernel, we should never get abort with FAR which is in range of 6398 * pmap->pm_pt1 or PT2MAP address spaces. If it happens, stop here 6399 * and print out a useful abort message and even get to the debugger 6400 * otherwise it likely ends with never ending loop of aborts. 6401 */ 6402 if (__predict_false(IN_RANGE2(far, pmap->pm_pt1, NB_IN_PT1))) { 6403 /* 6404 * All L1 tables should always be mapped and present. 6405 * However, we check only current one herein. For user mode, 6406 * only permission abort from malicious user is not fatal. 6407 * And alignment abort as it may have higher priority. 6408 */ 6409 if (!usermode || (idx != FAULT_ALIGN && idx != FAULT_PERM_L2)) { 6410 CTR4(KTR_PMAP, "%s: pmap %#x pm_pt1 %#x far %#x", 6411 __func__, pmap, pmap->pm_pt1, far); 6412 panic("%s: pm_pt1 abort", __func__); 6413 } 6414 return (KERN_INVALID_ADDRESS); 6415 } 6416 if (__predict_false(IN_RANGE2(far, PT2MAP, PT2MAP_SIZE))) { 6417 /* 6418 * PT2MAP should be always mapped and present in current 6419 * L1 table. However, only existing L2 tables are mapped 6420 * in PT2MAP. For user mode, only L2 translation abort and 6421 * permission abort from malicious user is not fatal. 6422 * And alignment abort as it may have higher priority. 6423 */ 6424 if (!usermode || (idx != FAULT_ALIGN && 6425 idx != FAULT_TRAN_L2 && idx != FAULT_PERM_L2)) { 6426 CTR4(KTR_PMAP, "%s: pmap %#x PT2MAP %#x far %#x", 6427 __func__, pmap, PT2MAP, far); 6428 panic("%s: PT2MAP abort", __func__); 6429 } 6430 return (KERN_INVALID_ADDRESS); 6431 } 6432 6433 /* 6434 * A pmap lock is used below for handling of access and R/W emulation 6435 * aborts. They were handled by atomic operations before so some 6436 * analysis of new situation is needed to answer the following question: 6437 * Is it safe to use the lock even for these aborts? 6438 * 6439 * There may happen two cases in general: 6440 * 6441 * (1) Aborts while the pmap lock is locked already - this should not 6442 * happen as pmap lock is not recursive. However, under pmap lock only 6443 * internal kernel data should be accessed and such data should be 6444 * mapped with A bit set and NM bit cleared. If double abort happens, 6445 * then a mapping of data which has caused it must be fixed. Further, 6446 * all new mappings are always made with A bit set and the bit can be 6447 * cleared only on managed mappings. 6448 * 6449 * (2) Aborts while another lock(s) is/are locked - this already can 6450 * happen. However, there is no difference here if it's either access or 6451 * R/W emulation abort, or if it's some other abort. 6452 */ 6453 6454 PMAP_LOCK(pmap); 6455 #ifdef INVARIANTS 6456 pte1 = pte1_load(pmap_pte1(pmap, far)); 6457 if (pte1_is_link(pte1)) { 6458 /* 6459 * Check in advance that associated L2 page table is mapped into 6460 * PT2MAP space. Note that faulty access to not mapped L2 page 6461 * table is caught in more general check above where "far" is 6462 * checked that it does not lay in PT2MAP space. Note also that 6463 * L1 page table and PT2TAB always exist and are mapped. 6464 */ 6465 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far)); 6466 if (!pte2_is_valid(pte2)) 6467 panic("%s: missing L2 page table (%p, %#x)", 6468 __func__, pmap, far); 6469 } 6470 #endif 6471 #ifdef SMP 6472 /* 6473 * Special treatment is due to break-before-make approach done when 6474 * pte1 is updated for userland mapping during section promotion or 6475 * demotion. If not caught here, pmap_enter() can find a section 6476 * mapping on faulting address. That is not allowed. 6477 */ 6478 if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) { 6479 PMAP_UNLOCK(pmap); 6480 return (KERN_SUCCESS); 6481 } 6482 #endif 6483 /* 6484 * Accesss bits for page and section. Note that the entry 6485 * is not in TLB yet, so TLB flush is not necessary. 6486 * 6487 * QQQ: This is hardware emulation, we do not call userret() 6488 * for aborts from user mode. 6489 */ 6490 if (idx == FAULT_ACCESS_L2) { 6491 pte1 = pte1_load(pmap_pte1(pmap, far)); 6492 if (pte1_is_link(pte1)) { 6493 /* L2 page table should exist and be mapped. */ 6494 pte2p = pt2map_entry(far); 6495 pte2 = pte2_load(pte2p); 6496 if (pte2_is_valid(pte2)) { 6497 pte2_store(pte2p, pte2 | PTE2_A); 6498 PMAP_UNLOCK(pmap); 6499 return (KERN_SUCCESS); 6500 } 6501 } else { 6502 /* 6503 * We got L2 access fault but PTE1 is not a link. 6504 * Probably some race happened, do nothing. 6505 */ 6506 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L2 - pmap %#x far %#x", 6507 __func__, pmap, far); 6508 PMAP_UNLOCK(pmap); 6509 return (KERN_SUCCESS); 6510 } 6511 } 6512 if (idx == FAULT_ACCESS_L1) { 6513 pte1p = pmap_pte1(pmap, far); 6514 pte1 = pte1_load(pte1p); 6515 if (pte1_is_section(pte1)) { 6516 pte1_store(pte1p, pte1 | PTE1_A); 6517 PMAP_UNLOCK(pmap); 6518 return (KERN_SUCCESS); 6519 } else { 6520 /* 6521 * We got L1 access fault but PTE1 is not section 6522 * mapping. Probably some race happened, do nothing. 6523 */ 6524 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L1 - pmap %#x far %#x", 6525 __func__, pmap, far); 6526 PMAP_UNLOCK(pmap); 6527 return (KERN_SUCCESS); 6528 } 6529 } 6530 6531 /* 6532 * Handle modify bits for page and section. Note that the modify 6533 * bit is emulated by software. So PTEx_RO is software read only 6534 * bit and PTEx_NM flag is real hardware read only bit. 6535 * 6536 * QQQ: This is hardware emulation, we do not call userret() 6537 * for aborts from user mode. 6538 */ 6539 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) { 6540 pte1 = pte1_load(pmap_pte1(pmap, far)); 6541 if (pte1_is_link(pte1)) { 6542 /* L2 page table should exist and be mapped. */ 6543 pte2p = pt2map_entry(far); 6544 pte2 = pte2_load(pte2p); 6545 if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) && 6546 (pte2 & PTE2_NM)) { 6547 pte2_store(pte2p, pte2 & ~PTE2_NM); 6548 tlb_flush(trunc_page(far)); 6549 PMAP_UNLOCK(pmap); 6550 return (KERN_SUCCESS); 6551 } 6552 } else { 6553 /* 6554 * We got L2 permission fault but PTE1 is not a link. 6555 * Probably some race happened, do nothing. 6556 */ 6557 CTR3(KTR_PMAP, "%s: FAULT_PERM_L2 - pmap %#x far %#x", 6558 __func__, pmap, far); 6559 PMAP_UNLOCK(pmap); 6560 return (KERN_SUCCESS); 6561 } 6562 } 6563 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) { 6564 pte1p = pmap_pte1(pmap, far); 6565 pte1 = pte1_load(pte1p); 6566 if (pte1_is_section(pte1)) { 6567 if (!(pte1 & PTE1_RO) && (pte1 & PTE1_NM)) { 6568 pte1_store(pte1p, pte1 & ~PTE1_NM); 6569 tlb_flush(pte1_trunc(far)); 6570 PMAP_UNLOCK(pmap); 6571 return (KERN_SUCCESS); 6572 } 6573 } else { 6574 /* 6575 * We got L1 permission fault but PTE1 is not section 6576 * mapping. Probably some race happened, do nothing. 6577 */ 6578 CTR3(KTR_PMAP, "%s: FAULT_PERM_L1 - pmap %#x far %#x", 6579 __func__, pmap, far); 6580 PMAP_UNLOCK(pmap); 6581 return (KERN_SUCCESS); 6582 } 6583 } 6584 6585 /* 6586 * QQQ: The previous code, mainly fast handling of access and 6587 * modify bits aborts, could be moved to ASM. Now we are 6588 * starting to deal with not fast aborts. 6589 */ 6590 PMAP_UNLOCK(pmap); 6591 return (KERN_FAILURE); 6592 } 6593 6594 #if defined(PMAP_DEBUG) 6595 /* 6596 * Reusing of KVA used in pmap_zero_page function !!! 6597 */ 6598 static void 6599 pmap_zero_page_check(vm_page_t m) 6600 { 6601 pt2_entry_t *cmap2_pte2p; 6602 uint32_t *p, *end; 6603 struct pcpu *pc; 6604 6605 sched_pin(); 6606 pc = get_pcpu(); 6607 cmap2_pte2p = pc->pc_cmap2_pte2p; 6608 mtx_lock(&pc->pc_cmap_lock); 6609 if (pte2_load(cmap2_pte2p) != 0) 6610 panic("%s: CMAP2 busy", __func__); 6611 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 6612 vm_page_pte2_attr(m))); 6613 end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE); 6614 for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++) 6615 if (*p != 0) 6616 panic("%s: page %p not zero, va: %p", __func__, m, 6617 pc->pc_cmap2_addr); 6618 pte2_clear(cmap2_pte2p); 6619 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6620 sched_unpin(); 6621 mtx_unlock(&pc->pc_cmap_lock); 6622 } 6623 6624 int 6625 pmap_pid_dump(int pid) 6626 { 6627 pmap_t pmap; 6628 struct proc *p; 6629 int npte2 = 0; 6630 int i, j, index; 6631 6632 sx_slock(&allproc_lock); 6633 FOREACH_PROC_IN_SYSTEM(p) { 6634 if (p->p_pid != pid || p->p_vmspace == NULL) 6635 continue; 6636 index = 0; 6637 pmap = vmspace_pmap(p->p_vmspace); 6638 for (i = 0; i < NPTE1_IN_PT1; i++) { 6639 pt1_entry_t pte1; 6640 pt2_entry_t *pte2p, pte2; 6641 vm_offset_t base, va; 6642 vm_paddr_t pa; 6643 vm_page_t m; 6644 6645 base = i << PTE1_SHIFT; 6646 pte1 = pte1_load(&pmap->pm_pt1[i]); 6647 6648 if (pte1_is_section(pte1)) { 6649 /* 6650 * QQQ: Do something here! 6651 */ 6652 } else if (pte1_is_link(pte1)) { 6653 for (j = 0; j < NPTE2_IN_PT2; j++) { 6654 va = base + (j << PAGE_SHIFT); 6655 if (va >= VM_MIN_KERNEL_ADDRESS) { 6656 if (index) { 6657 index = 0; 6658 printf("\n"); 6659 } 6660 sx_sunlock(&allproc_lock); 6661 return (npte2); 6662 } 6663 pte2p = pmap_pte2(pmap, va); 6664 pte2 = pte2_load(pte2p); 6665 pmap_pte2_release(pte2p); 6666 if (!pte2_is_valid(pte2)) 6667 continue; 6668 6669 pa = pte2_pa(pte2); 6670 m = PHYS_TO_VM_PAGE(pa); 6671 printf("va: 0x%x, pa: 0x%x, w: %d, " 6672 "f: 0x%x", va, pa, 6673 m->ref_count, m->flags); 6674 npte2++; 6675 index++; 6676 if (index >= 2) { 6677 index = 0; 6678 printf("\n"); 6679 } else { 6680 printf(" "); 6681 } 6682 } 6683 } 6684 } 6685 } 6686 sx_sunlock(&allproc_lock); 6687 return (npte2); 6688 } 6689 6690 #endif 6691 6692 #ifdef DDB 6693 static pt2_entry_t * 6694 pmap_pte2_ddb(pmap_t pmap, vm_offset_t va) 6695 { 6696 pt1_entry_t pte1; 6697 vm_paddr_t pt2pg_pa; 6698 6699 pte1 = pte1_load(pmap_pte1(pmap, va)); 6700 if (!pte1_is_link(pte1)) 6701 return (NULL); 6702 6703 if (pmap_is_current(pmap)) 6704 return (pt2map_entry(va)); 6705 6706 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 6707 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 6708 if (pte2_pa(pte2_load(PMAP3)) != pt2pg_pa) { 6709 pte2_store(PMAP3, PTE2_KPT(pt2pg_pa)); 6710 #ifdef SMP 6711 PMAP3cpu = PCPU_GET(cpuid); 6712 #endif 6713 tlb_flush_local((vm_offset_t)PADDR3); 6714 } 6715 #ifdef SMP 6716 else if (PMAP3cpu != PCPU_GET(cpuid)) { 6717 PMAP3cpu = PCPU_GET(cpuid); 6718 tlb_flush_local((vm_offset_t)PADDR3); 6719 } 6720 #endif 6721 return (PADDR3 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 6722 } 6723 6724 static void 6725 dump_pmap(pmap_t pmap) 6726 { 6727 6728 printf("pmap %p\n", pmap); 6729 printf(" pm_pt1: %p\n", pmap->pm_pt1); 6730 printf(" pm_pt2tab: %p\n", pmap->pm_pt2tab); 6731 printf(" pm_active: 0x%08lX\n", pmap->pm_active.__bits[0]); 6732 } 6733 6734 DB_SHOW_COMMAND(pmaps, pmap_list_pmaps) 6735 { 6736 6737 pmap_t pmap; 6738 LIST_FOREACH(pmap, &allpmaps, pm_list) { 6739 dump_pmap(pmap); 6740 } 6741 } 6742 6743 static int 6744 pte2_class(pt2_entry_t pte2) 6745 { 6746 int cls; 6747 6748 cls = (pte2 >> 2) & 0x03; 6749 cls |= (pte2 >> 4) & 0x04; 6750 return (cls); 6751 } 6752 6753 static void 6754 dump_section(pmap_t pmap, uint32_t pte1_idx) 6755 { 6756 } 6757 6758 static void 6759 dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok) 6760 { 6761 uint32_t i; 6762 vm_offset_t va; 6763 pt2_entry_t *pte2p, pte2; 6764 vm_page_t m; 6765 6766 va = pte1_idx << PTE1_SHIFT; 6767 pte2p = pmap_pte2_ddb(pmap, va); 6768 for (i = 0; i < NPTE2_IN_PT2; i++, pte2p++, va += PAGE_SIZE) { 6769 pte2 = pte2_load(pte2p); 6770 if (pte2 == 0) 6771 continue; 6772 if (!pte2_is_valid(pte2)) { 6773 printf(" 0x%08X: 0x%08X", va, pte2); 6774 if (!invalid_ok) 6775 printf(" - not valid !!!"); 6776 printf("\n"); 6777 continue; 6778 } 6779 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 6780 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2, 6781 pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m); 6782 if (m != NULL) { 6783 printf(" v:%d w:%d f:0x%04X\n", m->valid, 6784 m->ref_count, m->flags); 6785 } else { 6786 printf("\n"); 6787 } 6788 } 6789 } 6790 6791 static __inline boolean_t 6792 is_pv_chunk_space(vm_offset_t va) 6793 { 6794 6795 if ((((vm_offset_t)pv_chunkbase) <= va) && 6796 (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks))) 6797 return (TRUE); 6798 return (FALSE); 6799 } 6800 6801 DB_SHOW_COMMAND(pmap, pmap_pmap_print) 6802 { 6803 /* XXX convert args. */ 6804 pmap_t pmap = (pmap_t)addr; 6805 pt1_entry_t pte1; 6806 pt2_entry_t pte2; 6807 vm_offset_t va, eva; 6808 vm_page_t m; 6809 uint32_t i; 6810 boolean_t invalid_ok, dump_link_ok, dump_pv_chunk; 6811 6812 if (have_addr) { 6813 pmap_t pm; 6814 6815 LIST_FOREACH(pm, &allpmaps, pm_list) 6816 if (pm == pmap) break; 6817 if (pm == NULL) { 6818 printf("given pmap %p is not in allpmaps list\n", pmap); 6819 return; 6820 } 6821 } else 6822 pmap = PCPU_GET(curpmap); 6823 6824 eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF; 6825 dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */ 6826 6827 printf("pmap: 0x%08X\n", (uint32_t)pmap); 6828 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6829 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6830 6831 for(i = 0; i < NPTE1_IN_PT1; i++) { 6832 pte1 = pte1_load(&pmap->pm_pt1[i]); 6833 if (pte1 == 0) 6834 continue; 6835 va = i << PTE1_SHIFT; 6836 if (va >= eva) 6837 break; 6838 6839 if (pte1_is_section(pte1)) { 6840 printf("0x%08X: Section 0x%08X, s:%d g:%d\n", va, pte1, 6841 !!(pte1 & PTE1_S), !(pte1 & PTE1_NG)); 6842 dump_section(pmap, i); 6843 } else if (pte1_is_link(pte1)) { 6844 dump_link_ok = TRUE; 6845 invalid_ok = FALSE; 6846 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6847 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 6848 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p", 6849 va, pte1, pte2, m); 6850 if (is_pv_chunk_space(va)) { 6851 printf(" - pv_chunk space"); 6852 if (dump_pv_chunk) 6853 invalid_ok = TRUE; 6854 else 6855 dump_link_ok = FALSE; 6856 } 6857 else if (m != NULL) 6858 printf(" w:%d w2:%u", m->ref_count, 6859 pt2_wirecount_get(m, pte1_index(va))); 6860 if (pte2 == 0) 6861 printf(" !!! pt2tab entry is ZERO"); 6862 else if (pte2_pa(pte1) != pte2_pa(pte2)) 6863 printf(" !!! pt2tab entry is DIFFERENT - m: %p", 6864 PHYS_TO_VM_PAGE(pte2_pa(pte2))); 6865 printf("\n"); 6866 if (dump_link_ok) 6867 dump_link(pmap, i, invalid_ok); 6868 } else 6869 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6870 } 6871 } 6872 6873 static void 6874 dump_pt2tab(pmap_t pmap) 6875 { 6876 uint32_t i; 6877 pt2_entry_t pte2; 6878 vm_offset_t va; 6879 vm_paddr_t pa; 6880 vm_page_t m; 6881 6882 printf("PT2TAB:\n"); 6883 for (i = 0; i < PT2TAB_ENTRIES; i++) { 6884 pte2 = pte2_load(&pmap->pm_pt2tab[i]); 6885 if (!pte2_is_valid(pte2)) 6886 continue; 6887 va = i << PT2TAB_SHIFT; 6888 pa = pte2_pa(pte2); 6889 m = PHYS_TO_VM_PAGE(pa); 6890 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2, 6891 pte2_class(pte2), !!(pte2 & PTE2_S), m); 6892 if (m != NULL) 6893 printf(" , w: %d, f: 0x%04X pidx: %lld", 6894 m->ref_count, m->flags, m->pindex); 6895 printf("\n"); 6896 } 6897 } 6898 6899 DB_SHOW_COMMAND(pmap_pt2tab, pmap_pt2tab_print) 6900 { 6901 /* XXX convert args. */ 6902 pmap_t pmap = (pmap_t)addr; 6903 pt1_entry_t pte1; 6904 pt2_entry_t pte2; 6905 vm_offset_t va; 6906 uint32_t i, start; 6907 6908 if (have_addr) { 6909 printf("supported only on current pmap\n"); 6910 return; 6911 } 6912 6913 pmap = PCPU_GET(curpmap); 6914 printf("curpmap: 0x%08X\n", (uint32_t)pmap); 6915 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6916 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6917 6918 start = pte1_index((vm_offset_t)PT2MAP); 6919 for (i = start; i < (start + NPT2_IN_PT2TAB); i++) { 6920 pte1 = pte1_load(&pmap->pm_pt1[i]); 6921 if (pte1 == 0) 6922 continue; 6923 va = i << PTE1_SHIFT; 6924 if (pte1_is_section(pte1)) { 6925 printf("0x%08X: Section 0x%08X, s:%d\n", va, pte1, 6926 !!(pte1 & PTE1_S)); 6927 dump_section(pmap, i); 6928 } else if (pte1_is_link(pte1)) { 6929 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6930 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X\n", va, 6931 pte1, pte2); 6932 if (pte2 == 0) 6933 printf(" !!! pt2tab entry is ZERO\n"); 6934 } else 6935 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6936 } 6937 dump_pt2tab(pmap); 6938 } 6939 #endif 6940