1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * Copyright (c) 1994 John S. Dyson 6 * Copyright (c) 1994 David Greenman 7 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 8 * Copyright (c) 2014-2016 Svatopluk Kraus <skra@FreeBSD.org> 9 * Copyright (c) 2014-2016 Michal Meloun <mmel@FreeBSD.org> 10 * All rights reserved. 11 * 12 * This code is derived from software contributed to Berkeley by 13 * the Systems Programming Group of the University of Utah Computer 14 * Science Department and William Jolitz of UUNET Technologies Inc. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 41 */ 42 /*- 43 * Copyright (c) 2003 Networks Associates Technology, Inc. 44 * All rights reserved. 45 * 46 * This software was developed for the FreeBSD Project by Jake Burkholder, 47 * Safeport Network Services, and Network Associates Laboratories, the 48 * Security Research Division of Network Associates, Inc. under 49 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 50 * CHATS research program. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 61 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 */ 73 74 #include <sys/cdefs.h> 75 __FBSDID("$FreeBSD$"); 76 77 /* 78 * Manages physical address maps. 79 * 80 * Since the information managed by this module is 81 * also stored by the logical address mapping module, 82 * this module may throw away valid virtual-to-physical 83 * mappings at almost any time. However, invalidations 84 * of virtual-to-physical mappings must be done as 85 * requested. 86 * 87 * In order to cope with hardware architectures which 88 * make virtual-to-physical map invalidates expensive, 89 * this module may delay invalidate or reduced protection 90 * operations until such time as they are actually 91 * necessary. This module is given full information as 92 * to which processors are currently using which maps, 93 * and to when physical maps must be made correct. 94 */ 95 96 #include "opt_vm.h" 97 #include "opt_pmap.h" 98 #include "opt_ddb.h" 99 100 #include <sys/param.h> 101 #include <sys/systm.h> 102 #include <sys/kernel.h> 103 #include <sys/ktr.h> 104 #include <sys/lock.h> 105 #include <sys/proc.h> 106 #include <sys/rwlock.h> 107 #include <sys/malloc.h> 108 #include <sys/vmmeter.h> 109 #include <sys/malloc.h> 110 #include <sys/mman.h> 111 #include <sys/sf_buf.h> 112 #include <sys/smp.h> 113 #include <sys/sched.h> 114 #include <sys/sysctl.h> 115 116 #ifdef DDB 117 #include <ddb/ddb.h> 118 #endif 119 120 #include <machine/physmem.h> 121 122 #include <vm/vm.h> 123 #include <vm/uma.h> 124 #include <vm/pmap.h> 125 #include <vm/vm_param.h> 126 #include <vm/vm_kern.h> 127 #include <vm/vm_object.h> 128 #include <vm/vm_map.h> 129 #include <vm/vm_page.h> 130 #include <vm/vm_pageout.h> 131 #include <vm/vm_phys.h> 132 #include <vm/vm_extern.h> 133 #include <vm/vm_reserv.h> 134 #include <sys/lock.h> 135 #include <sys/mutex.h> 136 137 #include <machine/md_var.h> 138 #include <machine/pmap_var.h> 139 #include <machine/cpu.h> 140 #include <machine/pcb.h> 141 #include <machine/sf_buf.h> 142 #ifdef SMP 143 #include <machine/smp.h> 144 #endif 145 #ifndef PMAP_SHPGPERPROC 146 #define PMAP_SHPGPERPROC 200 147 #endif 148 149 #ifndef DIAGNOSTIC 150 #define PMAP_INLINE __inline 151 #else 152 #define PMAP_INLINE 153 #endif 154 155 #ifdef PMAP_DEBUG 156 static void pmap_zero_page_check(vm_page_t m); 157 void pmap_debug(int level); 158 int pmap_pid_dump(int pid); 159 160 #define PDEBUG(_lev_,_stat_) \ 161 if (pmap_debug_level >= (_lev_)) \ 162 ((_stat_)) 163 #define dprintf printf 164 int pmap_debug_level = 1; 165 #else /* PMAP_DEBUG */ 166 #define PDEBUG(_lev_,_stat_) /* Nothing */ 167 #define dprintf(x, arg...) 168 #endif /* PMAP_DEBUG */ 169 170 /* 171 * Level 2 page tables map definion ('max' is excluded). 172 */ 173 174 #define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 175 #define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE) 176 177 #define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 178 #define UPT2V_MAX_ADDRESS \ 179 ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT))) 180 181 /* 182 * Promotion to a 1MB (PTE1) page mapping requires that the corresponding 183 * 4KB (PTE2) page mappings have identical settings for the following fields: 184 */ 185 #define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \ 186 PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \ 187 PTE2_ATTR_MASK) 188 189 #define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \ 190 PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \ 191 PTE1_ATTR_MASK) 192 193 #define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \ 194 (((l2_attr) & L2_C) ? L1_S_C : 0) | \ 195 (((l2_attr) & L2_B) ? L1_S_B : 0) | \ 196 (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \ 197 (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \ 198 (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \ 199 (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \ 200 (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \ 201 (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \ 202 (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \ 203 (((l2_attr) & PTE2_W) ? PTE1_W : 0)) 204 205 #define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \ 206 (((l1_attr) & L1_S_C) ? L2_C : 0) | \ 207 (((l1_attr) & L1_S_B) ? L2_B : 0) | \ 208 (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \ 209 (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \ 210 (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \ 211 (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \ 212 (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \ 213 (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \ 214 (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \ 215 (((l1_attr) & PTE1_W) ? PTE2_W : 0)) 216 217 /* 218 * PTE2 descriptors creation macros. 219 */ 220 #define PTE2_ATTR_DEFAULT vm_memattr_to_pte2(VM_MEMATTR_DEFAULT) 221 #define PTE2_ATTR_PT vm_memattr_to_pte2(pt_memattr) 222 223 #define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 224 #define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 225 226 #define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT) 227 #define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_DEFAULT) 228 229 #define PV_STATS 230 #ifdef PV_STATS 231 #define PV_STAT(x) do { x ; } while (0) 232 #else 233 #define PV_STAT(x) do { } while (0) 234 #endif 235 236 /* 237 * The boot_pt1 is used temporary in very early boot stage as L1 page table. 238 * We can init many things with no memory allocation thanks to its static 239 * allocation and this brings two main advantages: 240 * (1) other cores can be started very simply, 241 * (2) various boot loaders can be supported as its arguments can be processed 242 * in virtual address space and can be moved to safe location before 243 * first allocation happened. 244 * Only disadvantage is that boot_pt1 is used only in very early boot stage. 245 * However, the table is uninitialized and so lays in bss. Therefore kernel 246 * image size is not influenced. 247 * 248 * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and 249 * CPU suspend/resume game. 250 */ 251 extern pt1_entry_t boot_pt1[]; 252 253 vm_paddr_t base_pt1; 254 pt1_entry_t *kern_pt1; 255 pt2_entry_t *kern_pt2tab; 256 pt2_entry_t *PT2MAP; 257 258 static uint32_t ttb_flags; 259 static vm_memattr_t pt_memattr; 260 ttb_entry_t pmap_kern_ttb; 261 262 struct pmap kernel_pmap_store; 263 LIST_HEAD(pmaplist, pmap); 264 static struct pmaplist allpmaps; 265 static struct mtx allpmaps_lock; 266 267 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 268 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 269 270 static vm_offset_t kernel_vm_end_new; 271 vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE; 272 vm_offset_t vm_max_kernel_address; 273 vm_paddr_t kernel_l1pa; 274 275 static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock; 276 277 /* 278 * Data for the pv entry allocation mechanism 279 */ 280 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 281 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 282 static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */ 283 static int shpgperproc = PMAP_SHPGPERPROC; 284 285 struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 286 int pv_maxchunks; /* How many chunks we have KVA for */ 287 vm_offset_t pv_vafree; /* freelist stored in the PTE */ 288 289 vm_paddr_t first_managed_pa; 290 #define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) 291 292 /* 293 * All those kernel PT submaps that BSD is so fond of 294 */ 295 caddr_t _tmppt = 0; 296 297 /* 298 * Crashdump maps. 299 */ 300 static caddr_t crashdumpmap; 301 302 static pt2_entry_t *PMAP1 = NULL, *PMAP2; 303 static pt2_entry_t *PADDR1 = NULL, *PADDR2; 304 #ifdef DDB 305 static pt2_entry_t *PMAP3; 306 static pt2_entry_t *PADDR3; 307 static int PMAP3cpu __unused; /* for SMP only */ 308 #endif 309 #ifdef SMP 310 static int PMAP1cpu; 311 static int PMAP1changedcpu; 312 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 313 &PMAP1changedcpu, 0, 314 "Number of times pmap_pte2_quick changed CPU with same PMAP1"); 315 #endif 316 static int PMAP1changed; 317 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 318 &PMAP1changed, 0, 319 "Number of times pmap_pte2_quick changed PMAP1"); 320 static int PMAP1unchanged; 321 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 322 &PMAP1unchanged, 0, 323 "Number of times pmap_pte2_quick didn't change PMAP1"); 324 static struct mtx PMAP2mutex; 325 326 /* 327 * Internal flags for pmap_enter()'s helper functions. 328 */ 329 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 330 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 331 332 static __inline void pt2_wirecount_init(vm_page_t m); 333 static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, 334 vm_offset_t va); 335 static int pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, 336 u_int flags, vm_page_t m); 337 void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size); 338 339 /* 340 * Function to set the debug level of the pmap code. 341 */ 342 #ifdef PMAP_DEBUG 343 void 344 pmap_debug(int level) 345 { 346 347 pmap_debug_level = level; 348 dprintf("pmap_debug: level=%d\n", pmap_debug_level); 349 } 350 #endif /* PMAP_DEBUG */ 351 352 /* 353 * This table must corespond with memory attribute configuration in vm.h. 354 * First entry is used for normal system mapping. 355 * 356 * Device memory is always marked as shared. 357 * Normal memory is shared only in SMP . 358 * Not outer shareable bits are not used yet. 359 * Class 6 cannot be used on ARM11. 360 */ 361 #define TEXDEF_TYPE_SHIFT 0 362 #define TEXDEF_TYPE_MASK 0x3 363 #define TEXDEF_INNER_SHIFT 2 364 #define TEXDEF_INNER_MASK 0x3 365 #define TEXDEF_OUTER_SHIFT 4 366 #define TEXDEF_OUTER_MASK 0x3 367 #define TEXDEF_NOS_SHIFT 6 368 #define TEXDEF_NOS_MASK 0x1 369 370 #define TEX(t, i, o, s) \ 371 ((t) << TEXDEF_TYPE_SHIFT) | \ 372 ((i) << TEXDEF_INNER_SHIFT) | \ 373 ((o) << TEXDEF_OUTER_SHIFT | \ 374 ((s) << TEXDEF_NOS_SHIFT)) 375 376 static uint32_t tex_class[8] = { 377 /* type inner cache outer cache */ 378 TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */ 379 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */ 380 TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */ 381 TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */ 382 TEX(PRRR_MEM, NMRR_WT, NMRR_WT, 0), /* 4 - ATTR_WT */ 383 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */ 384 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */ 385 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */ 386 }; 387 #undef TEX 388 389 static uint32_t pte2_attr_tab[8] = { 390 PTE2_ATTR_WB_WA, /* 0 - VM_MEMATTR_WB_WA */ 391 PTE2_ATTR_NOCACHE, /* 1 - VM_MEMATTR_NOCACHE */ 392 PTE2_ATTR_DEVICE, /* 2 - VM_MEMATTR_DEVICE */ 393 PTE2_ATTR_SO, /* 3 - VM_MEMATTR_SO */ 394 PTE2_ATTR_WT, /* 4 - VM_MEMATTR_WRITE_THROUGH */ 395 0, /* 5 - NOT USED YET */ 396 0, /* 6 - NOT USED YET */ 397 0 /* 7 - NOT USED YET */ 398 }; 399 CTASSERT(VM_MEMATTR_WB_WA == 0); 400 CTASSERT(VM_MEMATTR_NOCACHE == 1); 401 CTASSERT(VM_MEMATTR_DEVICE == 2); 402 CTASSERT(VM_MEMATTR_SO == 3); 403 CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4); 404 #define VM_MEMATTR_END (VM_MEMATTR_WRITE_THROUGH + 1) 405 406 boolean_t 407 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) 408 { 409 410 return (mode >= 0 && mode < VM_MEMATTR_END); 411 } 412 413 static inline uint32_t 414 vm_memattr_to_pte2(vm_memattr_t ma) 415 { 416 417 KASSERT((u_int)ma < VM_MEMATTR_END, 418 ("%s: bad vm_memattr_t %d", __func__, ma)); 419 return (pte2_attr_tab[(u_int)ma]); 420 } 421 422 static inline uint32_t 423 vm_page_pte2_attr(vm_page_t m) 424 { 425 426 return (vm_memattr_to_pte2(m->md.pat_mode)); 427 } 428 429 /* 430 * Convert TEX definition entry to TTB flags. 431 */ 432 static uint32_t 433 encode_ttb_flags(int idx) 434 { 435 uint32_t inner, outer, nos, reg; 436 437 inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) & 438 TEXDEF_INNER_MASK; 439 outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) & 440 TEXDEF_OUTER_MASK; 441 nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) & 442 TEXDEF_NOS_MASK; 443 444 reg = nos << 5; 445 reg |= outer << 3; 446 if (cpuinfo.coherent_walk) 447 reg |= (inner & 0x1) << 6; 448 reg |= (inner & 0x2) >> 1; 449 #ifdef SMP 450 ARM_SMP_UP( 451 reg |= 1 << 1, 452 ); 453 #endif 454 return reg; 455 } 456 457 /* 458 * Set TEX remapping registers in current CPU. 459 */ 460 void 461 pmap_set_tex(void) 462 { 463 uint32_t prrr, nmrr; 464 uint32_t type, inner, outer, nos; 465 int i; 466 467 #ifdef PMAP_PTE_NOCACHE 468 /* XXX fixme */ 469 if (cpuinfo.coherent_walk) { 470 pt_memattr = VM_MEMATTR_WB_WA; 471 ttb_flags = encode_ttb_flags(0); 472 } 473 else { 474 pt_memattr = VM_MEMATTR_NOCACHE; 475 ttb_flags = encode_ttb_flags(1); 476 } 477 #else 478 pt_memattr = VM_MEMATTR_WB_WA; 479 ttb_flags = encode_ttb_flags(0); 480 #endif 481 482 prrr = 0; 483 nmrr = 0; 484 485 /* Build remapping register from TEX classes. */ 486 for (i = 0; i < 8; i++) { 487 type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) & 488 TEXDEF_TYPE_MASK; 489 inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) & 490 TEXDEF_INNER_MASK; 491 outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) & 492 TEXDEF_OUTER_MASK; 493 nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) & 494 TEXDEF_NOS_MASK; 495 496 prrr |= type << (i * 2); 497 prrr |= nos << (i + 24); 498 nmrr |= inner << (i * 2); 499 nmrr |= outer << (i * 2 + 16); 500 } 501 /* Add shareable bits for device memory. */ 502 prrr |= PRRR_DS0 | PRRR_DS1; 503 504 /* Add shareable bits for normal memory in SMP case. */ 505 #ifdef SMP 506 ARM_SMP_UP( 507 prrr |= PRRR_NS1, 508 ); 509 #endif 510 cp15_prrr_set(prrr); 511 cp15_nmrr_set(nmrr); 512 513 /* Caches are disabled, so full TLB flush should be enough. */ 514 tlb_flush_all_local(); 515 } 516 517 /* 518 * Remap one vm_meattr class to another one. This can be useful as 519 * workaround for SOC errata, e.g. if devices must be accessed using 520 * SO memory class. 521 * 522 * !!! Please note that this function is absolutely last resort thing. 523 * It should not be used under normal circumstances. !!! 524 * 525 * Usage rules: 526 * - it shall be called after pmap_bootstrap_prepare() and before 527 * cpu_mp_start() (thus only on boot CPU). In practice, it's expected 528 * to be called from platform_attach() or platform_late_init(). 529 * 530 * - if remapping doesn't change caching mode, or until uncached class 531 * is remapped to any kind of cached one, then no other restriction exists. 532 * 533 * - if pmap_remap_vm_attr() changes caching mode, but both (original and 534 * remapped) remain cached, then caller is resposible for calling 535 * of dcache_wbinv_poc_all(). 536 * 537 * - remapping of any kind of cached class to uncached is not permitted. 538 */ 539 void 540 pmap_remap_vm_attr(vm_memattr_t old_attr, vm_memattr_t new_attr) 541 { 542 int old_idx, new_idx; 543 544 /* Map VM memattrs to indexes to tex_class table. */ 545 old_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)old_attr]); 546 new_idx = PTE2_ATTR2IDX(pte2_attr_tab[(int)new_attr]); 547 548 /* Replace TEX attribute and apply it. */ 549 tex_class[old_idx] = tex_class[new_idx]; 550 pmap_set_tex(); 551 } 552 553 /* 554 * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words, 555 * KERNBASE is mapped by first L2 page table in L2 page table page. It 556 * meets same constrain due to PT2MAP being placed just under KERNBASE. 557 */ 558 CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0); 559 CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE); 560 561 /* 562 * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general. 563 * For now, anyhow, the following check must be fulfilled. 564 */ 565 CTASSERT(PAGE_SIZE == PTE2_SIZE); 566 /* 567 * We don't want to mess up MI code with all MMU and PMAP definitions, 568 * so some things, which depend on other ones, are defined independently. 569 * Now, it is time to check that we don't screw up something. 570 */ 571 CTASSERT(PDRSHIFT == PTE1_SHIFT); 572 /* 573 * Check L1 and L2 page table entries definitions consistency. 574 */ 575 CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1)); 576 CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2)); 577 /* 578 * Check L2 page tables page consistency. 579 */ 580 CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2)); 581 CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG); 582 /* 583 * Check PT2TAB consistency. 584 * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG. 585 * This should be done without remainder. 586 */ 587 CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG)); 588 589 /* 590 * A PT2MAP magic. 591 * 592 * All level 2 page tables (PT2s) are mapped continuously and accordingly 593 * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can 594 * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page 595 * must be used together, but not necessary at once. The first PT2 in a page 596 * must map things on correctly aligned address and the others must follow 597 * in right order. 598 */ 599 #define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t)) 600 #define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2) 601 #define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE) 602 603 /* 604 * Check PT2TAB consistency. 605 * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2. 606 * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE. 607 * The both should be done without remainder. 608 */ 609 CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2)); 610 CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE)); 611 /* 612 * The implementation was made general, however, with the assumption 613 * bellow in mind. In case of another value of NPG_IN_PT2TAB, 614 * the code should be once more rechecked. 615 */ 616 CTASSERT(NPG_IN_PT2TAB == 1); 617 618 /* 619 * Get offset of PT2 in a page 620 * associated with given PT1 index. 621 */ 622 static __inline u_int 623 page_pt2off(u_int pt1_idx) 624 { 625 626 return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2); 627 } 628 629 /* 630 * Get physical address of PT2 631 * associated with given PT2s page and PT1 index. 632 */ 633 static __inline vm_paddr_t 634 page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx) 635 { 636 637 return (pgpa + page_pt2off(pt1_idx)); 638 } 639 640 /* 641 * Get first entry of PT2 642 * associated with given PT2s page and PT1 index. 643 */ 644 static __inline pt2_entry_t * 645 page_pt2(vm_offset_t pgva, u_int pt1_idx) 646 { 647 648 return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx))); 649 } 650 651 /* 652 * Get virtual address of PT2s page (mapped in PT2MAP) 653 * which holds PT2 which holds entry which maps given virtual address. 654 */ 655 static __inline vm_offset_t 656 pt2map_pt2pg(vm_offset_t va) 657 { 658 659 va &= ~(NPT2_IN_PG * PTE1_SIZE - 1); 660 return ((vm_offset_t)pt2map_entry(va)); 661 } 662 663 /***************************************************************************** 664 * 665 * THREE pmap initialization milestones exist: 666 * 667 * locore.S 668 * -> fundamental init (including MMU) in ASM 669 * 670 * initarm() 671 * -> fundamental init continues in C 672 * -> first available physical address is known 673 * 674 * pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins) 675 * -> basic (safe) interface for physical address allocation is made 676 * -> basic (safe) interface for virtual mapping is made 677 * -> limited not SMP coherent work is possible 678 * 679 * -> more fundamental init continues in C 680 * -> locks and some more things are available 681 * -> all fundamental allocations and mappings are done 682 * 683 * pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins) 684 * -> phys_avail[] and virtual_avail is set 685 * -> control is passed to vm subsystem 686 * -> physical and virtual address allocation are off limit 687 * -> low level mapping functions, some SMP coherent, 688 * are available, which cannot be used before vm subsystem 689 * is being inited 690 * 691 * mi_startup() 692 * -> vm subsystem is being inited 693 * 694 * pmap_init() -> THIRD PMAP MILESTONE (third epoch begins) 695 * -> pmap is fully inited 696 * 697 *****************************************************************************/ 698 699 /***************************************************************************** 700 * 701 * PMAP first stage initialization and utility functions 702 * for pre-bootstrap epoch. 703 * 704 * After pmap_bootstrap_prepare() is called, the following functions 705 * can be used: 706 * 707 * (1) strictly only for this stage functions for physical page allocations, 708 * virtual space allocations, and mappings: 709 * 710 * vm_paddr_t pmap_preboot_get_pages(u_int num); 711 * void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num); 712 * vm_offset_t pmap_preboot_reserve_pages(u_int num); 713 * vm_offset_t pmap_preboot_get_vpages(u_int num); 714 * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 715 * vm_prot_t prot, vm_memattr_t attr); 716 * 717 * (2) for all stages: 718 * 719 * vm_paddr_t pmap_kextract(vm_offset_t va); 720 * 721 * NOTE: This is not SMP coherent stage. 722 * 723 *****************************************************************************/ 724 725 #define KERNEL_P2V(pa) \ 726 ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR)) 727 #define KERNEL_V2P(va) \ 728 ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr)) 729 730 static vm_paddr_t last_paddr; 731 732 /* 733 * Pre-bootstrap epoch page allocator. 734 */ 735 vm_paddr_t 736 pmap_preboot_get_pages(u_int num) 737 { 738 vm_paddr_t ret; 739 740 ret = last_paddr; 741 last_paddr += num * PAGE_SIZE; 742 743 return (ret); 744 } 745 746 /* 747 * The fundamental initialization of PMAP stuff. 748 * 749 * Some things already happened in locore.S and some things could happen 750 * before pmap_bootstrap_prepare() is called, so let's recall what is done: 751 * 1. Caches are disabled. 752 * 2. We are running on virtual addresses already with 'boot_pt1' 753 * as L1 page table. 754 * 3. So far, all virtual addresses can be converted to physical ones and 755 * vice versa by the following macros: 756 * KERNEL_P2V(pa) .... physical to virtual ones, 757 * KERNEL_V2P(va) .... virtual to physical ones. 758 * 759 * What is done herein: 760 * 1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'. 761 * 2. PT2MAP magic is brought to live. 762 * 3. Basic preboot functions for page allocations and mappings can be used. 763 * 4. Everything is prepared for L1 cache enabling. 764 * 765 * Variations: 766 * 1. To use second TTB register, so kernel and users page tables will be 767 * separated. This way process forking - pmap_pinit() - could be faster, 768 * it saves physical pages and KVA per a process, and it's simple change. 769 * However, it will lead, due to hardware matter, to the following: 770 * (a) 2G space for kernel and 2G space for users. 771 * (b) 1G space for kernel in low addresses and 3G for users above it. 772 * A question is: Is the case (b) really an option? Note that case (b) 773 * does save neither physical memory and KVA. 774 */ 775 void 776 pmap_bootstrap_prepare(vm_paddr_t last) 777 { 778 vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size; 779 vm_offset_t pt2pg_va; 780 pt1_entry_t *pte1p; 781 pt2_entry_t *pte2p; 782 u_int i; 783 uint32_t l1_attr; 784 785 /* 786 * Now, we are going to make real kernel mapping. Note that we are 787 * already running on some mapping made in locore.S and we expect 788 * that it's large enough to ensure nofault access to physical memory 789 * allocated herein before switch. 790 * 791 * As kernel image and everything needed before are and will be mapped 792 * by section mappings, we align last physical address to PTE1_SIZE. 793 */ 794 last_paddr = pte1_roundup(last); 795 796 /* 797 * Allocate and zero page(s) for kernel L1 page table. 798 * 799 * Note that it's first allocation on space which was PTE1_SIZE 800 * aligned and as such base_pt1 is aligned to NB_IN_PT1 too. 801 */ 802 base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1); 803 kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1); 804 bzero((void*)kern_pt1, NB_IN_PT1); 805 pte1_sync_range(kern_pt1, NB_IN_PT1); 806 807 /* Allocate and zero page(s) for kernel PT2TAB. */ 808 pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB); 809 kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa); 810 bzero(kern_pt2tab, NB_IN_PT2TAB); 811 pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB); 812 813 /* Allocate and zero page(s) for kernel L2 page tables. */ 814 pt2pg_pa = pmap_preboot_get_pages(NKPT2PG); 815 pt2pg_va = KERNEL_P2V(pt2pg_pa); 816 size = NKPT2PG * PAGE_SIZE; 817 bzero((void*)pt2pg_va, size); 818 pte2_sync_range((pt2_entry_t *)pt2pg_va, size); 819 820 /* 821 * Add a physical memory segment (vm_phys_seg) corresponding to the 822 * preallocated pages for kernel L2 page tables so that vm_page 823 * structures representing these pages will be created. The vm_page 824 * structures are required for promotion of the corresponding kernel 825 * virtual addresses to section mappings. 826 */ 827 vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0)); 828 829 /* 830 * Insert allocated L2 page table pages to PT2TAB and make 831 * link to all PT2s in L1 page table. See how kernel_vm_end 832 * is initialized. 833 * 834 * We play simple and safe. So every KVA will have underlaying 835 * L2 page table, even kernel image mapped by sections. 836 */ 837 pte2p = kern_pt2tab_entry(KERNBASE); 838 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE) 839 pt2tab_store(pte2p++, PTE2_KPT(pa)); 840 841 pte1p = kern_pte1(KERNBASE); 842 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2) 843 pte1_store(pte1p++, PTE1_LINK(pa)); 844 845 /* Make section mappings for kernel. */ 846 l1_attr = ATTR_TO_L1(PTE2_ATTR_DEFAULT); 847 pte1p = kern_pte1(KERNBASE); 848 for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE) 849 pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW, l1_attr)); 850 851 /* 852 * Get free and aligned space for PT2MAP and make L1 page table links 853 * to L2 page tables held in PT2TAB. 854 * 855 * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t 856 * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus 857 * each entry in PT2TAB maps all PT2s in a page. This implies that 858 * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE. 859 */ 860 PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE); 861 pte1p = kern_pte1((vm_offset_t)PT2MAP); 862 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 863 pte1_store(pte1p++, PTE1_LINK(pa)); 864 } 865 866 /* 867 * Store PT2TAB in PT2TAB itself, i.e. self reference mapping. 868 * Each pmap will hold own PT2TAB, so the mapping should be not global. 869 */ 870 pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP); 871 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 872 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 873 } 874 875 /* 876 * Choose correct L2 page table and make mappings for allocations 877 * made herein which replaces temporary locore.S mappings after a while. 878 * Note that PT2MAP cannot be used until we switch to kern_pt1. 879 * 880 * Note, that these allocations started aligned on 1M section and 881 * kernel PT1 was allocated first. Making of mappings must follow 882 * order of physical allocations as we've used KERNEL_P2V() macro 883 * for virtual addresses resolution. 884 */ 885 pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1); 886 pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p))); 887 888 pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1)); 889 890 /* Make mapping for kernel L1 page table. */ 891 for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE) 892 pte2_store(pte2p++, PTE2_KPT(pa)); 893 894 /* Make mapping for kernel PT2TAB. */ 895 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) 896 pte2_store(pte2p++, PTE2_KPT(pa)); 897 898 /* Finally, switch from 'boot_pt1' to 'kern_pt1'. */ 899 pmap_kern_ttb = base_pt1 | ttb_flags; 900 cpuinfo_reinit_mmu(pmap_kern_ttb); 901 /* 902 * Initialize the first available KVA. As kernel image is mapped by 903 * sections, we are leaving some gap behind. 904 */ 905 virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE; 906 } 907 908 /* 909 * Setup L2 page table page for given KVA. 910 * Used in pre-bootstrap epoch. 911 * 912 * Note that we have allocated NKPT2PG pages for L2 page tables in advance 913 * and used them for mapping KVA starting from KERNBASE. However, this is not 914 * enough. Vectors and devices need L2 page tables too. Note that they are 915 * even above VM_MAX_KERNEL_ADDRESS. 916 */ 917 static __inline vm_paddr_t 918 pmap_preboot_pt2pg_setup(vm_offset_t va) 919 { 920 pt2_entry_t *pte2p, pte2; 921 vm_paddr_t pt2pg_pa; 922 923 /* Get associated entry in PT2TAB. */ 924 pte2p = kern_pt2tab_entry(va); 925 926 /* Just return, if PT2s page exists already. */ 927 pte2 = pt2tab_load(pte2p); 928 if (pte2_is_valid(pte2)) 929 return (pte2_pa(pte2)); 930 931 KASSERT(va >= VM_MAX_KERNEL_ADDRESS, 932 ("%s: NKPT2PG too small", __func__)); 933 934 /* 935 * Allocate page for PT2s and insert it to PT2TAB. 936 * In other words, map it into PT2MAP space. 937 */ 938 pt2pg_pa = pmap_preboot_get_pages(1); 939 pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa)); 940 941 /* Zero all PT2s in allocated page. */ 942 bzero((void*)pt2map_pt2pg(va), PAGE_SIZE); 943 pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE); 944 945 return (pt2pg_pa); 946 } 947 948 /* 949 * Setup L2 page table for given KVA. 950 * Used in pre-bootstrap epoch. 951 */ 952 static void 953 pmap_preboot_pt2_setup(vm_offset_t va) 954 { 955 pt1_entry_t *pte1p; 956 vm_paddr_t pt2pg_pa, pt2_pa; 957 958 /* Setup PT2's page. */ 959 pt2pg_pa = pmap_preboot_pt2pg_setup(va); 960 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va)); 961 962 /* Insert PT2 to PT1. */ 963 pte1p = kern_pte1(va); 964 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 965 } 966 967 /* 968 * Get L2 page entry associated with given KVA. 969 * Used in pre-bootstrap epoch. 970 */ 971 static __inline pt2_entry_t* 972 pmap_preboot_vtopte2(vm_offset_t va) 973 { 974 pt1_entry_t *pte1p; 975 976 /* Setup PT2 if needed. */ 977 pte1p = kern_pte1(va); 978 if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */ 979 pmap_preboot_pt2_setup(va); 980 981 return (pt2map_entry(va)); 982 } 983 984 /* 985 * Pre-bootstrap epoch page(s) mapping(s). 986 */ 987 void 988 pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num) 989 { 990 u_int i; 991 pt2_entry_t *pte2p; 992 993 /* Map all the pages. */ 994 for (i = 0; i < num; i++) { 995 pte2p = pmap_preboot_vtopte2(va); 996 pte2_store(pte2p, PTE2_KRW(pa)); 997 va += PAGE_SIZE; 998 pa += PAGE_SIZE; 999 } 1000 } 1001 1002 /* 1003 * Pre-bootstrap epoch virtual space alocator. 1004 */ 1005 vm_offset_t 1006 pmap_preboot_reserve_pages(u_int num) 1007 { 1008 u_int i; 1009 vm_offset_t start, va; 1010 pt2_entry_t *pte2p; 1011 1012 /* Allocate virtual space. */ 1013 start = va = virtual_avail; 1014 virtual_avail += num * PAGE_SIZE; 1015 1016 /* Zero the mapping. */ 1017 for (i = 0; i < num; i++) { 1018 pte2p = pmap_preboot_vtopte2(va); 1019 pte2_store(pte2p, 0); 1020 va += PAGE_SIZE; 1021 } 1022 1023 return (start); 1024 } 1025 1026 /* 1027 * Pre-bootstrap epoch page(s) allocation and mapping(s). 1028 */ 1029 vm_offset_t 1030 pmap_preboot_get_vpages(u_int num) 1031 { 1032 vm_paddr_t pa; 1033 vm_offset_t va; 1034 1035 /* Allocate physical page(s). */ 1036 pa = pmap_preboot_get_pages(num); 1037 1038 /* Allocate virtual space. */ 1039 va = virtual_avail; 1040 virtual_avail += num * PAGE_SIZE; 1041 1042 /* Map and zero all. */ 1043 pmap_preboot_map_pages(pa, va, num); 1044 bzero((void *)va, num * PAGE_SIZE); 1045 1046 return (va); 1047 } 1048 1049 /* 1050 * Pre-bootstrap epoch page mapping(s) with attributes. 1051 */ 1052 void 1053 pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 1054 vm_prot_t prot, vm_memattr_t attr) 1055 { 1056 u_int num; 1057 u_int l1_attr, l1_prot, l2_prot, l2_attr; 1058 pt1_entry_t *pte1p; 1059 pt2_entry_t *pte2p; 1060 1061 l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR; 1062 l2_prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1063 l2_attr = vm_memattr_to_pte2(attr); 1064 l1_prot = ATTR_TO_L1(l2_prot); 1065 l1_attr = ATTR_TO_L1(l2_attr); 1066 1067 /* Map all the pages. */ 1068 num = round_page(size); 1069 while (num > 0) { 1070 if ((((va | pa) & PTE1_OFFSET) == 0) && (num >= PTE1_SIZE)) { 1071 pte1p = kern_pte1(va); 1072 pte1_store(pte1p, PTE1_KERN(pa, l1_prot, l1_attr)); 1073 va += PTE1_SIZE; 1074 pa += PTE1_SIZE; 1075 num -= PTE1_SIZE; 1076 } else { 1077 pte2p = pmap_preboot_vtopte2(va); 1078 pte2_store(pte2p, PTE2_KERN(pa, l2_prot, l2_attr)); 1079 va += PAGE_SIZE; 1080 pa += PAGE_SIZE; 1081 num -= PAGE_SIZE; 1082 } 1083 } 1084 } 1085 1086 /* 1087 * Extract from the kernel page table the physical address 1088 * that is mapped by the given virtual address "va". 1089 */ 1090 vm_paddr_t 1091 pmap_kextract(vm_offset_t va) 1092 { 1093 vm_paddr_t pa; 1094 pt1_entry_t pte1; 1095 pt2_entry_t pte2; 1096 1097 pte1 = pte1_load(kern_pte1(va)); 1098 if (pte1_is_section(pte1)) { 1099 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1100 } else if (pte1_is_link(pte1)) { 1101 /* 1102 * We should beware of concurrent promotion that changes 1103 * pte1 at this point. However, it's not a problem as PT2 1104 * page is preserved by promotion in PT2TAB. So even if 1105 * it happens, using of PT2MAP is still safe. 1106 * 1107 * QQQ: However, concurrent removing is a problem which 1108 * ends in abort on PT2MAP space. Locking must be used 1109 * to deal with this. 1110 */ 1111 pte2 = pte2_load(pt2map_entry(va)); 1112 pa = pte2_pa(pte2) | (va & PTE2_OFFSET); 1113 } 1114 else { 1115 panic("%s: va %#x pte1 %#x", __func__, va, pte1); 1116 } 1117 return (pa); 1118 } 1119 1120 /* 1121 * Extract from the kernel page table the physical address 1122 * that is mapped by the given virtual address "va". Also 1123 * return L2 page table entry which maps the address. 1124 * 1125 * This is only intended to be used for panic dumps. 1126 */ 1127 vm_paddr_t 1128 pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) 1129 { 1130 vm_paddr_t pa; 1131 pt1_entry_t pte1; 1132 pt2_entry_t pte2; 1133 1134 pte1 = pte1_load(kern_pte1(va)); 1135 if (pte1_is_section(pte1)) { 1136 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1137 pte2 = pa | ATTR_TO_L2(pte1) | PTE2_V; 1138 } else if (pte1_is_link(pte1)) { 1139 pte2 = pte2_load(pt2map_entry(va)); 1140 pa = pte2_pa(pte2); 1141 } else { 1142 pte2 = 0; 1143 pa = 0; 1144 } 1145 if (pte2p != NULL) 1146 *pte2p = pte2; 1147 return (pa); 1148 } 1149 1150 /***************************************************************************** 1151 * 1152 * PMAP second stage initialization and utility functions 1153 * for bootstrap epoch. 1154 * 1155 * After pmap_bootstrap() is called, the following functions for 1156 * mappings can be used: 1157 * 1158 * void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 1159 * void pmap_kremove(vm_offset_t va); 1160 * vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1161 * int prot); 1162 * 1163 * NOTE: This is not SMP coherent stage. And physical page allocation is not 1164 * allowed during this stage. 1165 * 1166 *****************************************************************************/ 1167 1168 /* 1169 * Initialize kernel PMAP locks and lists, kernel_pmap itself, and 1170 * reserve various virtual spaces for temporary mappings. 1171 */ 1172 void 1173 pmap_bootstrap(vm_offset_t firstaddr) 1174 { 1175 pt2_entry_t *unused __unused; 1176 struct pcpu *pc; 1177 1178 /* 1179 * Initialize the kernel pmap (which is statically allocated). 1180 */ 1181 PMAP_LOCK_INIT(kernel_pmap); 1182 kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */ 1183 kernel_pmap->pm_pt1 = kern_pt1; 1184 kernel_pmap->pm_pt2tab = kern_pt2tab; 1185 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 1186 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 1187 1188 /* 1189 * Initialize the global pv list lock. 1190 */ 1191 rw_init(&pvh_global_lock, "pmap pv global"); 1192 1193 LIST_INIT(&allpmaps); 1194 1195 /* 1196 * Request a spin mutex so that changes to allpmaps cannot be 1197 * preempted by smp_rendezvous_cpus(). 1198 */ 1199 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 1200 mtx_lock_spin(&allpmaps_lock); 1201 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 1202 mtx_unlock_spin(&allpmaps_lock); 1203 1204 /* 1205 * Reserve some special page table entries/VA space for temporary 1206 * mapping of pages. 1207 */ 1208 #define SYSMAP(c, p, v, n) do { \ 1209 v = (c)pmap_preboot_reserve_pages(n); \ 1210 p = pt2map_entry((vm_offset_t)v); \ 1211 } while (0) 1212 1213 /* 1214 * Local CMAP1/CMAP2 are used for zeroing and copying pages. 1215 * Local CMAP2 is also used for data cache cleaning. 1216 */ 1217 pc = get_pcpu(); 1218 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1219 SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1); 1220 SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1); 1221 SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1); 1222 1223 /* 1224 * Crashdump maps. 1225 */ 1226 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS); 1227 1228 /* 1229 * _tmppt is used for reading arbitrary physical pages via /dev/mem. 1230 */ 1231 SYSMAP(caddr_t, unused, _tmppt, 1); 1232 1233 /* 1234 * PADDR1 and PADDR2 are used by pmap_pte2_quick() and pmap_pte2(), 1235 * respectively. PADDR3 is used by pmap_pte2_ddb(). 1236 */ 1237 SYSMAP(pt2_entry_t *, PMAP1, PADDR1, 1); 1238 SYSMAP(pt2_entry_t *, PMAP2, PADDR2, 1); 1239 #ifdef DDB 1240 SYSMAP(pt2_entry_t *, PMAP3, PADDR3, 1); 1241 #endif 1242 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 1243 1244 /* 1245 * Note that in very short time in initarm(), we are going to 1246 * initialize phys_avail[] array and no further page allocation 1247 * can happen after that until vm subsystem will be initialized. 1248 */ 1249 kernel_vm_end_new = kernel_vm_end; 1250 virtual_end = vm_max_kernel_address; 1251 } 1252 1253 static void 1254 pmap_init_reserved_pages(void) 1255 { 1256 struct pcpu *pc; 1257 vm_offset_t pages; 1258 int i; 1259 1260 CPU_FOREACH(i) { 1261 pc = pcpu_find(i); 1262 /* 1263 * Skip if the mapping has already been initialized, 1264 * i.e. this is the BSP. 1265 */ 1266 if (pc->pc_cmap1_addr != 0) 1267 continue; 1268 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1269 pages = kva_alloc(PAGE_SIZE * 3); 1270 if (pages == 0) 1271 panic("%s: unable to allocate KVA", __func__); 1272 pc->pc_cmap1_pte2p = pt2map_entry(pages); 1273 pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE); 1274 pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2)); 1275 pc->pc_cmap1_addr = (caddr_t)pages; 1276 pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE); 1277 pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); 1278 } 1279 } 1280 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 1281 1282 /* 1283 * The function can already be use in second initialization stage. 1284 * As such, the function DOES NOT call pmap_growkernel() where PT2 1285 * allocation can happen. So if used, be sure that PT2 for given 1286 * virtual address is allocated already! 1287 * 1288 * Add a wired page to the kva. 1289 * Note: not SMP coherent. 1290 */ 1291 static __inline void 1292 pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot, 1293 uint32_t attr) 1294 { 1295 pt1_entry_t *pte1p; 1296 pt2_entry_t *pte2p; 1297 1298 pte1p = kern_pte1(va); 1299 if (!pte1_is_valid(pte1_load(pte1p))) { /* XXX - sections ?! */ 1300 /* 1301 * This is a very low level function, so PT2 and particularly 1302 * PT2PG associated with given virtual address must be already 1303 * allocated. It's a pain mainly during pmap initialization 1304 * stage. However, called after pmap initialization with 1305 * virtual address not under kernel_vm_end will lead to 1306 * the same misery. 1307 */ 1308 if (!pte2_is_valid(pte2_load(kern_pt2tab_entry(va)))) 1309 panic("%s: kernel PT2 not allocated!", __func__); 1310 } 1311 1312 pte2p = pt2map_entry(va); 1313 pte2_store(pte2p, PTE2_KERN(pa, prot, attr)); 1314 } 1315 1316 PMAP_INLINE void 1317 pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1318 { 1319 1320 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT); 1321 } 1322 1323 /* 1324 * Remove a page from the kernel pagetables. 1325 * Note: not SMP coherent. 1326 */ 1327 PMAP_INLINE void 1328 pmap_kremove(vm_offset_t va) 1329 { 1330 pt1_entry_t *pte1p; 1331 pt2_entry_t *pte2p; 1332 1333 pte1p = kern_pte1(va); 1334 if (pte1_is_section(pte1_load(pte1p))) { 1335 pte1_clear(pte1p); 1336 } else { 1337 pte2p = pt2map_entry(va); 1338 pte2_clear(pte2p); 1339 } 1340 } 1341 1342 /* 1343 * Share new kernel PT2PG with all pmaps. 1344 * The caller is responsible for maintaining TLB consistency. 1345 */ 1346 static void 1347 pmap_kenter_pt2tab(vm_offset_t va, pt2_entry_t npte2) 1348 { 1349 pmap_t pmap; 1350 pt2_entry_t *pte2p; 1351 1352 mtx_lock_spin(&allpmaps_lock); 1353 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1354 pte2p = pmap_pt2tab_entry(pmap, va); 1355 pt2tab_store(pte2p, npte2); 1356 } 1357 mtx_unlock_spin(&allpmaps_lock); 1358 } 1359 1360 /* 1361 * Share new kernel PTE1 with all pmaps. 1362 * The caller is responsible for maintaining TLB consistency. 1363 */ 1364 static void 1365 pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1) 1366 { 1367 pmap_t pmap; 1368 pt1_entry_t *pte1p; 1369 1370 mtx_lock_spin(&allpmaps_lock); 1371 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1372 pte1p = pmap_pte1(pmap, va); 1373 pte1_store(pte1p, npte1); 1374 } 1375 mtx_unlock_spin(&allpmaps_lock); 1376 } 1377 1378 /* 1379 * Used to map a range of physical addresses into kernel 1380 * virtual address space. 1381 * 1382 * The value passed in '*virt' is a suggested virtual address for 1383 * the mapping. Architectures which can support a direct-mapped 1384 * physical to virtual region can return the appropriate address 1385 * within that region, leaving '*virt' unchanged. Other 1386 * architectures should map the pages starting at '*virt' and 1387 * update '*virt' with the first usable address after the mapped 1388 * region. 1389 * 1390 * NOTE: Read the comments above pmap_kenter_prot_attr() as 1391 * the function is used herein! 1392 */ 1393 vm_offset_t 1394 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1395 { 1396 vm_offset_t va, sva; 1397 vm_paddr_t pte1_offset; 1398 pt1_entry_t npte1; 1399 uint32_t l1prot, l2prot; 1400 uint32_t l1attr, l2attr; 1401 1402 PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x)," 1403 " prot = %d\n", __func__, *virt, start, end, end - start, prot)); 1404 1405 l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR; 1406 l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1407 l1prot = ATTR_TO_L1(l2prot); 1408 1409 l2attr = PTE2_ATTR_DEFAULT; 1410 l1attr = ATTR_TO_L1(l2attr); 1411 1412 va = *virt; 1413 /* 1414 * Does the physical address range's size and alignment permit at 1415 * least one section mapping to be created? 1416 */ 1417 pte1_offset = start & PTE1_OFFSET; 1418 if ((end - start) - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) >= 1419 PTE1_SIZE) { 1420 /* 1421 * Increase the starting virtual address so that its alignment 1422 * does not preclude the use of section mappings. 1423 */ 1424 if ((va & PTE1_OFFSET) < pte1_offset) 1425 va = pte1_trunc(va) + pte1_offset; 1426 else if ((va & PTE1_OFFSET) > pte1_offset) 1427 va = pte1_roundup(va) + pte1_offset; 1428 } 1429 sva = va; 1430 while (start < end) { 1431 if ((start & PTE1_OFFSET) == 0 && end - start >= PTE1_SIZE) { 1432 KASSERT((va & PTE1_OFFSET) == 0, 1433 ("%s: misaligned va %#x", __func__, va)); 1434 npte1 = PTE1_KERN(start, l1prot, l1attr); 1435 pmap_kenter_pte1(va, npte1); 1436 va += PTE1_SIZE; 1437 start += PTE1_SIZE; 1438 } else { 1439 pmap_kenter_prot_attr(va, start, l2prot, l2attr); 1440 va += PAGE_SIZE; 1441 start += PAGE_SIZE; 1442 } 1443 } 1444 tlb_flush_range(sva, va - sva); 1445 *virt = va; 1446 return (sva); 1447 } 1448 1449 /* 1450 * Make a temporary mapping for a physical address. 1451 * This is only intended to be used for panic dumps. 1452 */ 1453 void * 1454 pmap_kenter_temporary(vm_paddr_t pa, int i) 1455 { 1456 vm_offset_t va; 1457 1458 /* QQQ: 'i' should be less or equal to MAXDUMPPGS. */ 1459 1460 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 1461 pmap_kenter(va, pa); 1462 tlb_flush_local(va); 1463 return ((void *)crashdumpmap); 1464 } 1465 1466 1467 /************************************* 1468 * 1469 * TLB & cache maintenance routines. 1470 * 1471 *************************************/ 1472 1473 /* 1474 * We inline these within pmap.c for speed. 1475 */ 1476 PMAP_INLINE void 1477 pmap_tlb_flush(pmap_t pmap, vm_offset_t va) 1478 { 1479 1480 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1481 tlb_flush(va); 1482 } 1483 1484 PMAP_INLINE void 1485 pmap_tlb_flush_range(pmap_t pmap, vm_offset_t sva, vm_size_t size) 1486 { 1487 1488 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1489 tlb_flush_range(sva, size); 1490 } 1491 1492 /* 1493 * Abuse the pte2 nodes for unmapped kva to thread a kva freelist through. 1494 * Requirements: 1495 * - Must deal with pages in order to ensure that none of the PTE2_* bits 1496 * are ever set, PTE2_V in particular. 1497 * - Assumes we can write to pte2s without pte2_store() atomic ops. 1498 * - Assumes nothing will ever test these addresses for 0 to indicate 1499 * no mapping instead of correctly checking PTE2_V. 1500 * - Assumes a vm_offset_t will fit in a pte2 (true for arm). 1501 * Because PTE2_V is never set, there can be no mappings to invalidate. 1502 */ 1503 static vm_offset_t 1504 pmap_pte2list_alloc(vm_offset_t *head) 1505 { 1506 pt2_entry_t *pte2p; 1507 vm_offset_t va; 1508 1509 va = *head; 1510 if (va == 0) 1511 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 1512 pte2p = pt2map_entry(va); 1513 *head = *pte2p; 1514 if (*head & PTE2_V) 1515 panic("%s: va with PTE2_V set!", __func__); 1516 *pte2p = 0; 1517 return (va); 1518 } 1519 1520 static void 1521 pmap_pte2list_free(vm_offset_t *head, vm_offset_t va) 1522 { 1523 pt2_entry_t *pte2p; 1524 1525 if (va & PTE2_V) 1526 panic("%s: freeing va with PTE2_V set!", __func__); 1527 pte2p = pt2map_entry(va); 1528 *pte2p = *head; /* virtual! PTE2_V is 0 though */ 1529 *head = va; 1530 } 1531 1532 static void 1533 pmap_pte2list_init(vm_offset_t *head, void *base, int npages) 1534 { 1535 int i; 1536 vm_offset_t va; 1537 1538 *head = 0; 1539 for (i = npages - 1; i >= 0; i--) { 1540 va = (vm_offset_t)base + i * PAGE_SIZE; 1541 pmap_pte2list_free(head, va); 1542 } 1543 } 1544 1545 /***************************************************************************** 1546 * 1547 * PMAP third and final stage initialization. 1548 * 1549 * After pmap_init() is called, PMAP subsystem is fully initialized. 1550 * 1551 *****************************************************************************/ 1552 1553 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 1554 "VM/pmap parameters"); 1555 1556 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 1557 "Max number of PV entries"); 1558 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 1559 "Page share factor per proc"); 1560 1561 static u_long nkpt2pg = NKPT2PG; 1562 SYSCTL_ULONG(_vm_pmap, OID_AUTO, nkpt2pg, CTLFLAG_RD, 1563 &nkpt2pg, 0, "Pre-allocated pages for kernel PT2s"); 1564 1565 static int sp_enabled = 1; 1566 SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 1567 &sp_enabled, 0, "Are large page mappings enabled?"); 1568 1569 bool 1570 pmap_ps_enabled(pmap_t pmap __unused) 1571 { 1572 1573 return (sp_enabled != 0); 1574 } 1575 1576 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pte1, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 1577 "1MB page mapping counters"); 1578 1579 static u_long pmap_pte1_demotions; 1580 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, demotions, CTLFLAG_RD, 1581 &pmap_pte1_demotions, 0, "1MB page demotions"); 1582 1583 static u_long pmap_pte1_mappings; 1584 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, mappings, CTLFLAG_RD, 1585 &pmap_pte1_mappings, 0, "1MB page mappings"); 1586 1587 static u_long pmap_pte1_p_failures; 1588 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, p_failures, CTLFLAG_RD, 1589 &pmap_pte1_p_failures, 0, "1MB page promotion failures"); 1590 1591 static u_long pmap_pte1_promotions; 1592 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD, 1593 &pmap_pte1_promotions, 0, "1MB page promotions"); 1594 1595 static u_long pmap_pte1_kern_demotions; 1596 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD, 1597 &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions"); 1598 1599 static u_long pmap_pte1_kern_promotions; 1600 SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD, 1601 &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions"); 1602 1603 static __inline ttb_entry_t 1604 pmap_ttb_get(pmap_t pmap) 1605 { 1606 1607 return (vtophys(pmap->pm_pt1) | ttb_flags); 1608 } 1609 1610 /* 1611 * Initialize a vm_page's machine-dependent fields. 1612 * 1613 * Variations: 1614 * 1. Pages for L2 page tables are always not managed. So, pv_list and 1615 * pt2_wirecount can share same physical space. However, proper 1616 * initialization on a page alloc for page tables and reinitialization 1617 * on the page free must be ensured. 1618 */ 1619 void 1620 pmap_page_init(vm_page_t m) 1621 { 1622 1623 TAILQ_INIT(&m->md.pv_list); 1624 pt2_wirecount_init(m); 1625 m->md.pat_mode = VM_MEMATTR_DEFAULT; 1626 } 1627 1628 /* 1629 * Virtualization for faster way how to zero whole page. 1630 */ 1631 static __inline void 1632 pagezero(void *page) 1633 { 1634 1635 bzero(page, PAGE_SIZE); 1636 } 1637 1638 /* 1639 * Zero L2 page table page. 1640 * Use same KVA as in pmap_zero_page(). 1641 */ 1642 static __inline vm_paddr_t 1643 pmap_pt2pg_zero(vm_page_t m) 1644 { 1645 pt2_entry_t *cmap2_pte2p; 1646 vm_paddr_t pa; 1647 struct pcpu *pc; 1648 1649 pa = VM_PAGE_TO_PHYS(m); 1650 1651 /* 1652 * XXX: For now, we map whole page even if it's already zero, 1653 * to sync it even if the sync is only DSB. 1654 */ 1655 sched_pin(); 1656 pc = get_pcpu(); 1657 cmap2_pte2p = pc->pc_cmap2_pte2p; 1658 mtx_lock(&pc->pc_cmap_lock); 1659 if (pte2_load(cmap2_pte2p) != 0) 1660 panic("%s: CMAP2 busy", __func__); 1661 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 1662 vm_page_pte2_attr(m))); 1663 /* Even VM_ALLOC_ZERO request is only advisory. */ 1664 if ((m->flags & PG_ZERO) == 0) 1665 pagezero(pc->pc_cmap2_addr); 1666 pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE); 1667 pte2_clear(cmap2_pte2p); 1668 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 1669 1670 /* 1671 * Unpin the thread before releasing the lock. Otherwise the thread 1672 * could be rescheduled while still bound to the current CPU, only 1673 * to unpin itself immediately upon resuming execution. 1674 */ 1675 sched_unpin(); 1676 mtx_unlock(&pc->pc_cmap_lock); 1677 1678 return (pa); 1679 } 1680 1681 /* 1682 * Init just allocated page as L2 page table(s) holder 1683 * and return its physical address. 1684 */ 1685 static __inline vm_paddr_t 1686 pmap_pt2pg_init(pmap_t pmap, vm_offset_t va, vm_page_t m) 1687 { 1688 vm_paddr_t pa; 1689 pt2_entry_t *pte2p; 1690 1691 /* Check page attributes. */ 1692 if (m->md.pat_mode != pt_memattr) 1693 pmap_page_set_memattr(m, pt_memattr); 1694 1695 /* Zero page and init wire counts. */ 1696 pa = pmap_pt2pg_zero(m); 1697 pt2_wirecount_init(m); 1698 1699 /* 1700 * Map page to PT2MAP address space for given pmap. 1701 * Note that PT2MAP space is shared with all pmaps. 1702 */ 1703 if (pmap == kernel_pmap) 1704 pmap_kenter_pt2tab(va, PTE2_KPT(pa)); 1705 else { 1706 pte2p = pmap_pt2tab_entry(pmap, va); 1707 pt2tab_store(pte2p, PTE2_KPT_NG(pa)); 1708 } 1709 1710 return (pa); 1711 } 1712 1713 /* 1714 * Initialize the pmap module. 1715 * Called by vm_init, to initialize any structures that the pmap 1716 * system needs to map virtual memory. 1717 */ 1718 void 1719 pmap_init(void) 1720 { 1721 vm_size_t s; 1722 pt2_entry_t *pte2p, pte2; 1723 u_int i, pte1_idx, pv_npg; 1724 1725 PDEBUG(1, printf("%s: phys_start = %#x\n", __func__, PHYSADDR)); 1726 1727 /* 1728 * Initialize the vm page array entries for kernel pmap's 1729 * L2 page table pages allocated in advance. 1730 */ 1731 pte1_idx = pte1_index(KERNBASE - PT2MAP_SIZE); 1732 pte2p = kern_pt2tab_entry(KERNBASE - PT2MAP_SIZE); 1733 for (i = 0; i < nkpt2pg + NPG_IN_PT2TAB; i++, pte2p++) { 1734 vm_paddr_t pa; 1735 vm_page_t m; 1736 1737 pte2 = pte2_load(pte2p); 1738 KASSERT(pte2_is_valid(pte2), ("%s: no valid entry", __func__)); 1739 1740 pa = pte2_pa(pte2); 1741 m = PHYS_TO_VM_PAGE(pa); 1742 KASSERT(m >= vm_page_array && 1743 m < &vm_page_array[vm_page_array_size], 1744 ("%s: L2 page table page is out of range", __func__)); 1745 1746 m->pindex = pte1_idx; 1747 m->phys_addr = pa; 1748 pte1_idx += NPT2_IN_PG; 1749 } 1750 1751 /* 1752 * Initialize the address space (zone) for the pv entries. Set a 1753 * high water mark so that the system can recover from excessive 1754 * numbers of pv entries. 1755 */ 1756 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1757 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1758 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1759 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1760 pv_entry_high_water = 9 * (pv_entry_max / 10); 1761 1762 /* 1763 * Are large page mappings enabled? 1764 */ 1765 TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled); 1766 if (sp_enabled) { 1767 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1768 ("%s: can't assign to pagesizes[1]", __func__)); 1769 pagesizes[1] = PTE1_SIZE; 1770 } 1771 1772 /* 1773 * Calculate the size of the pv head table for sections. 1774 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1775 * Note that the table is only for sections which could be promoted. 1776 */ 1777 first_managed_pa = pte1_trunc(vm_phys_segs[0].start); 1778 pv_npg = (pte1_trunc(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE) 1779 - first_managed_pa) / PTE1_SIZE + 1; 1780 1781 /* 1782 * Allocate memory for the pv head table for sections. 1783 */ 1784 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1785 s = round_page(s); 1786 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); 1787 for (i = 0; i < pv_npg; i++) 1788 TAILQ_INIT(&pv_table[i].pv_list); 1789 1790 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1791 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1792 if (pv_chunkbase == NULL) 1793 panic("%s: not enough kvm for pv chunks", __func__); 1794 pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1795 } 1796 1797 /* 1798 * Add a list of wired pages to the kva 1799 * this routine is only used for temporary 1800 * kernel mappings that do not need to have 1801 * page modification or references recorded. 1802 * Note that old mappings are simply written 1803 * over. The page *must* be wired. 1804 * Note: SMP coherent. Uses a ranged shootdown IPI. 1805 */ 1806 void 1807 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1808 { 1809 u_int anychanged; 1810 pt2_entry_t *epte2p, *pte2p, pte2; 1811 vm_page_t m; 1812 vm_paddr_t pa; 1813 1814 anychanged = 0; 1815 pte2p = pt2map_entry(sva); 1816 epte2p = pte2p + count; 1817 while (pte2p < epte2p) { 1818 m = *ma++; 1819 pa = VM_PAGE_TO_PHYS(m); 1820 pte2 = pte2_load(pte2p); 1821 if ((pte2_pa(pte2) != pa) || 1822 (pte2_attr(pte2) != vm_page_pte2_attr(m))) { 1823 anychanged++; 1824 pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW, 1825 vm_page_pte2_attr(m))); 1826 } 1827 pte2p++; 1828 } 1829 if (__predict_false(anychanged)) 1830 tlb_flush_range(sva, count * PAGE_SIZE); 1831 } 1832 1833 /* 1834 * This routine tears out page mappings from the 1835 * kernel -- it is meant only for temporary mappings. 1836 * Note: SMP coherent. Uses a ranged shootdown IPI. 1837 */ 1838 void 1839 pmap_qremove(vm_offset_t sva, int count) 1840 { 1841 vm_offset_t va; 1842 1843 va = sva; 1844 while (count-- > 0) { 1845 pmap_kremove(va); 1846 va += PAGE_SIZE; 1847 } 1848 tlb_flush_range(sva, va - sva); 1849 } 1850 1851 /* 1852 * Are we current address space or kernel? 1853 */ 1854 static __inline int 1855 pmap_is_current(pmap_t pmap) 1856 { 1857 1858 return (pmap == kernel_pmap || 1859 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace))); 1860 } 1861 1862 /* 1863 * If the given pmap is not the current or kernel pmap, the returned 1864 * pte2 must be released by passing it to pmap_pte2_release(). 1865 */ 1866 static pt2_entry_t * 1867 pmap_pte2(pmap_t pmap, vm_offset_t va) 1868 { 1869 pt1_entry_t pte1; 1870 vm_paddr_t pt2pg_pa; 1871 1872 pte1 = pte1_load(pmap_pte1(pmap, va)); 1873 if (pte1_is_section(pte1)) 1874 panic("%s: attempt to map PTE1", __func__); 1875 if (pte1_is_link(pte1)) { 1876 /* Are we current address space or kernel? */ 1877 if (pmap_is_current(pmap)) 1878 return (pt2map_entry(va)); 1879 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1880 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1881 mtx_lock(&PMAP2mutex); 1882 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 1883 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 1884 tlb_flush((vm_offset_t)PADDR2); 1885 } 1886 return (PADDR2 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1887 } 1888 return (NULL); 1889 } 1890 1891 /* 1892 * Releases a pte2 that was obtained from pmap_pte2(). 1893 * Be prepared for the pte2p being NULL. 1894 */ 1895 static __inline void 1896 pmap_pte2_release(pt2_entry_t *pte2p) 1897 { 1898 1899 if ((pt2_entry_t *)(trunc_page((vm_offset_t)pte2p)) == PADDR2) { 1900 mtx_unlock(&PMAP2mutex); 1901 } 1902 } 1903 1904 /* 1905 * Super fast pmap_pte2 routine best used when scanning 1906 * the pv lists. This eliminates many coarse-grained 1907 * invltlb calls. Note that many of the pv list 1908 * scans are across different pmaps. It is very wasteful 1909 * to do an entire tlb flush for checking a single mapping. 1910 * 1911 * If the given pmap is not the current pmap, pvh_global_lock 1912 * must be held and curthread pinned to a CPU. 1913 */ 1914 static pt2_entry_t * 1915 pmap_pte2_quick(pmap_t pmap, vm_offset_t va) 1916 { 1917 pt1_entry_t pte1; 1918 vm_paddr_t pt2pg_pa; 1919 1920 pte1 = pte1_load(pmap_pte1(pmap, va)); 1921 if (pte1_is_section(pte1)) 1922 panic("%s: attempt to map PTE1", __func__); 1923 if (pte1_is_link(pte1)) { 1924 /* Are we current address space or kernel? */ 1925 if (pmap_is_current(pmap)) 1926 return (pt2map_entry(va)); 1927 rw_assert(&pvh_global_lock, RA_WLOCKED); 1928 KASSERT(curthread->td_pinned > 0, 1929 ("%s: curthread not pinned", __func__)); 1930 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1931 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1932 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 1933 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 1934 #ifdef SMP 1935 PMAP1cpu = PCPU_GET(cpuid); 1936 #endif 1937 tlb_flush_local((vm_offset_t)PADDR1); 1938 PMAP1changed++; 1939 } else 1940 #ifdef SMP 1941 if (PMAP1cpu != PCPU_GET(cpuid)) { 1942 PMAP1cpu = PCPU_GET(cpuid); 1943 tlb_flush_local((vm_offset_t)PADDR1); 1944 PMAP1changedcpu++; 1945 } else 1946 #endif 1947 PMAP1unchanged++; 1948 return (PADDR1 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1949 } 1950 return (NULL); 1951 } 1952 1953 /* 1954 * Routine: pmap_extract 1955 * Function: 1956 * Extract the physical page address associated 1957 * with the given map/virtual_address pair. 1958 */ 1959 vm_paddr_t 1960 pmap_extract(pmap_t pmap, vm_offset_t va) 1961 { 1962 vm_paddr_t pa; 1963 pt1_entry_t pte1; 1964 pt2_entry_t *pte2p; 1965 1966 PMAP_LOCK(pmap); 1967 pte1 = pte1_load(pmap_pte1(pmap, va)); 1968 if (pte1_is_section(pte1)) 1969 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1970 else if (pte1_is_link(pte1)) { 1971 pte2p = pmap_pte2(pmap, va); 1972 pa = pte2_pa(pte2_load(pte2p)) | (va & PTE2_OFFSET); 1973 pmap_pte2_release(pte2p); 1974 } else 1975 pa = 0; 1976 PMAP_UNLOCK(pmap); 1977 return (pa); 1978 } 1979 1980 /* 1981 * Routine: pmap_extract_and_hold 1982 * Function: 1983 * Atomically extract and hold the physical page 1984 * with the given pmap and virtual address pair 1985 * if that mapping permits the given protection. 1986 */ 1987 vm_page_t 1988 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1989 { 1990 vm_paddr_t pa; 1991 pt1_entry_t pte1; 1992 pt2_entry_t pte2, *pte2p; 1993 vm_page_t m; 1994 1995 m = NULL; 1996 PMAP_LOCK(pmap); 1997 pte1 = pte1_load(pmap_pte1(pmap, va)); 1998 if (pte1_is_section(pte1)) { 1999 if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) { 2000 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 2001 m = PHYS_TO_VM_PAGE(pa); 2002 if (!vm_page_wire_mapped(m)) 2003 m = NULL; 2004 } 2005 } else if (pte1_is_link(pte1)) { 2006 pte2p = pmap_pte2(pmap, va); 2007 pte2 = pte2_load(pte2p); 2008 pmap_pte2_release(pte2p); 2009 if (pte2_is_valid(pte2) && 2010 (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) { 2011 pa = pte2_pa(pte2); 2012 m = PHYS_TO_VM_PAGE(pa); 2013 if (!vm_page_wire_mapped(m)) 2014 m = NULL; 2015 } 2016 } 2017 PMAP_UNLOCK(pmap); 2018 return (m); 2019 } 2020 2021 /* 2022 * Grow the number of kernel L2 page table entries, if needed. 2023 */ 2024 void 2025 pmap_growkernel(vm_offset_t addr) 2026 { 2027 vm_page_t m; 2028 vm_paddr_t pt2pg_pa, pt2_pa; 2029 pt1_entry_t pte1; 2030 pt2_entry_t pte2; 2031 2032 PDEBUG(1, printf("%s: addr = %#x\n", __func__, addr)); 2033 /* 2034 * All the time kernel_vm_end is first KVA for which underlying 2035 * L2 page table is either not allocated or linked from L1 page table 2036 * (not considering sections). Except for two possible cases: 2037 * 2038 * (1) in the very beginning as long as pmap_growkernel() was 2039 * not called, it could be first unused KVA (which is not 2040 * rounded up to PTE1_SIZE), 2041 * 2042 * (2) when all KVA space is mapped and vm_map_max(kernel_map) 2043 * address is not rounded up to PTE1_SIZE. (For example, 2044 * it could be 0xFFFFFFFF.) 2045 */ 2046 kernel_vm_end = pte1_roundup(kernel_vm_end); 2047 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2048 addr = roundup2(addr, PTE1_SIZE); 2049 if (addr - 1 >= vm_map_max(kernel_map)) 2050 addr = vm_map_max(kernel_map); 2051 while (kernel_vm_end < addr) { 2052 pte1 = pte1_load(kern_pte1(kernel_vm_end)); 2053 if (pte1_is_valid(pte1)) { 2054 kernel_vm_end += PTE1_SIZE; 2055 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2056 kernel_vm_end = vm_map_max(kernel_map); 2057 break; 2058 } 2059 continue; 2060 } 2061 2062 /* 2063 * kernel_vm_end_new is used in pmap_pinit() when kernel 2064 * mappings are entered to new pmap all at once to avoid race 2065 * between pmap_kenter_pte1() and kernel_vm_end increase. 2066 * The same aplies to pmap_kenter_pt2tab(). 2067 */ 2068 kernel_vm_end_new = kernel_vm_end + PTE1_SIZE; 2069 2070 pte2 = pt2tab_load(kern_pt2tab_entry(kernel_vm_end)); 2071 if (!pte2_is_valid(pte2)) { 2072 /* 2073 * Install new PT2s page into kernel PT2TAB. 2074 */ 2075 m = vm_page_alloc(NULL, 2076 pte1_index(kernel_vm_end) & ~PT2PG_MASK, 2077 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 2078 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2079 if (m == NULL) 2080 panic("%s: no memory to grow kernel", __func__); 2081 /* 2082 * QQQ: To link all new L2 page tables from L1 page 2083 * table now and so pmap_kenter_pte1() them 2084 * at once together with pmap_kenter_pt2tab() 2085 * could be nice speed up. However, 2086 * pmap_growkernel() does not happen so often... 2087 * QQQ: The other TTBR is another option. 2088 */ 2089 pt2pg_pa = pmap_pt2pg_init(kernel_pmap, kernel_vm_end, 2090 m); 2091 } else 2092 pt2pg_pa = pte2_pa(pte2); 2093 2094 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(kernel_vm_end)); 2095 pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa)); 2096 2097 kernel_vm_end = kernel_vm_end_new; 2098 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2099 kernel_vm_end = vm_map_max(kernel_map); 2100 break; 2101 } 2102 } 2103 } 2104 2105 static int 2106 kvm_size(SYSCTL_HANDLER_ARGS) 2107 { 2108 unsigned long ksize = vm_max_kernel_address - KERNBASE; 2109 2110 return (sysctl_handle_long(oidp, &ksize, 0, req)); 2111 } 2112 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, 2113 CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0, kvm_size, "IU", 2114 "Size of KVM"); 2115 2116 static int 2117 kvm_free(SYSCTL_HANDLER_ARGS) 2118 { 2119 unsigned long kfree = vm_max_kernel_address - kernel_vm_end; 2120 2121 return (sysctl_handle_long(oidp, &kfree, 0, req)); 2122 } 2123 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, 2124 CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0, kvm_free, "IU", 2125 "Amount of KVM free"); 2126 2127 /*********************************************** 2128 * 2129 * Pmap allocation/deallocation routines. 2130 * 2131 ***********************************************/ 2132 2133 /* 2134 * Initialize the pmap for the swapper process. 2135 */ 2136 void 2137 pmap_pinit0(pmap_t pmap) 2138 { 2139 PDEBUG(1, printf("%s: pmap = %p\n", __func__, pmap)); 2140 2141 PMAP_LOCK_INIT(pmap); 2142 2143 /* 2144 * Kernel page table directory and pmap stuff around is already 2145 * initialized, we are using it right now and here. So, finish 2146 * only PMAP structures initialization for process0 ... 2147 * 2148 * Since the L1 page table and PT2TAB is shared with the kernel pmap, 2149 * which is already included in the list "allpmaps", this pmap does 2150 * not need to be inserted into that list. 2151 */ 2152 pmap->pm_pt1 = kern_pt1; 2153 pmap->pm_pt2tab = kern_pt2tab; 2154 CPU_ZERO(&pmap->pm_active); 2155 PCPU_SET(curpmap, pmap); 2156 TAILQ_INIT(&pmap->pm_pvchunk); 2157 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2158 CPU_SET(0, &pmap->pm_active); 2159 } 2160 2161 static __inline void 2162 pte1_copy_nosync(pt1_entry_t *spte1p, pt1_entry_t *dpte1p, vm_offset_t sva, 2163 vm_offset_t eva) 2164 { 2165 u_int idx, count; 2166 2167 idx = pte1_index(sva); 2168 count = (pte1_index(eva) - idx + 1) * sizeof(pt1_entry_t); 2169 bcopy(spte1p + idx, dpte1p + idx, count); 2170 } 2171 2172 static __inline void 2173 pt2tab_copy_nosync(pt2_entry_t *spte2p, pt2_entry_t *dpte2p, vm_offset_t sva, 2174 vm_offset_t eva) 2175 { 2176 u_int idx, count; 2177 2178 idx = pt2tab_index(sva); 2179 count = (pt2tab_index(eva) - idx + 1) * sizeof(pt2_entry_t); 2180 bcopy(spte2p + idx, dpte2p + idx, count); 2181 } 2182 2183 /* 2184 * Initialize a preallocated and zeroed pmap structure, 2185 * such as one in a vmspace structure. 2186 */ 2187 int 2188 pmap_pinit(pmap_t pmap) 2189 { 2190 pt1_entry_t *pte1p; 2191 pt2_entry_t *pte2p; 2192 vm_paddr_t pa, pt2tab_pa; 2193 u_int i; 2194 2195 PDEBUG(6, printf("%s: pmap = %p, pm_pt1 = %p\n", __func__, pmap, 2196 pmap->pm_pt1)); 2197 2198 /* 2199 * No need to allocate L2 page table space yet but we do need 2200 * a valid L1 page table and PT2TAB table. 2201 * 2202 * Install shared kernel mappings to these tables. It's a little 2203 * tricky as some parts of KVA are reserved for vectors, devices, 2204 * and whatever else. These parts are supposed to be above 2205 * vm_max_kernel_address. Thus two regions should be installed: 2206 * 2207 * (1) <KERNBASE, kernel_vm_end), 2208 * (2) <vm_max_kernel_address, 0xFFFFFFFF>. 2209 * 2210 * QQQ: The second region should be stable enough to be installed 2211 * only once in time when the tables are allocated. 2212 * QQQ: Maybe copy of both regions at once could be faster ... 2213 * QQQ: Maybe the other TTBR is an option. 2214 * 2215 * Finally, install own PT2TAB table to these tables. 2216 */ 2217 2218 if (pmap->pm_pt1 == NULL) { 2219 pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(NB_IN_PT1, 2220 M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr); 2221 if (pmap->pm_pt1 == NULL) 2222 return (0); 2223 } 2224 if (pmap->pm_pt2tab == NULL) { 2225 /* 2226 * QQQ: (1) PT2TAB must be contiguous. If PT2TAB is one page 2227 * only, what should be the only size for 32 bit systems, 2228 * then we could allocate it with vm_page_alloc() and all 2229 * the stuff needed as other L2 page table pages. 2230 * (2) Note that a process PT2TAB is special L2 page table 2231 * page. Its mapping in kernel_arena is permanent and can 2232 * be used no matter which process is current. Its mapping 2233 * in PT2MAP can be used only for current process. 2234 */ 2235 pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(NB_IN_PT2TAB, 2236 M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr); 2237 if (pmap->pm_pt2tab == NULL) { 2238 /* 2239 * QQQ: As struct pmap is allocated from UMA with 2240 * UMA_ZONE_NOFREE flag, it's important to leave 2241 * no allocation in pmap if initialization failed. 2242 */ 2243 kmem_free((vm_offset_t)pmap->pm_pt1, NB_IN_PT1); 2244 pmap->pm_pt1 = NULL; 2245 return (0); 2246 } 2247 /* 2248 * QQQ: Each L2 page table page vm_page_t has pindex set to 2249 * pte1 index of virtual address mapped by this page. 2250 * It's not valid for non kernel PT2TABs themselves. 2251 * The pindex of these pages can not be altered because 2252 * of the way how they are allocated now. However, it 2253 * should not be a problem. 2254 */ 2255 } 2256 2257 mtx_lock_spin(&allpmaps_lock); 2258 /* 2259 * To avoid race with pmap_kenter_pte1() and pmap_kenter_pt2tab(), 2260 * kernel_vm_end_new is used here instead of kernel_vm_end. 2261 */ 2262 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, KERNBASE, 2263 kernel_vm_end_new - 1); 2264 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, vm_max_kernel_address, 2265 0xFFFFFFFF); 2266 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, KERNBASE, 2267 kernel_vm_end_new - 1); 2268 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, vm_max_kernel_address, 2269 0xFFFFFFFF); 2270 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 2271 mtx_unlock_spin(&allpmaps_lock); 2272 2273 /* 2274 * Store PT2MAP PT2 pages (a.k.a. PT2TAB) in PT2TAB itself. 2275 * I.e. self reference mapping. The PT2TAB is private, however mapped 2276 * into shared PT2MAP space, so the mapping should be not global. 2277 */ 2278 pt2tab_pa = vtophys(pmap->pm_pt2tab); 2279 pte2p = pmap_pt2tab_entry(pmap, (vm_offset_t)PT2MAP); 2280 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 2281 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 2282 } 2283 2284 /* Insert PT2MAP PT2s into pmap PT1. */ 2285 pte1p = pmap_pte1(pmap, (vm_offset_t)PT2MAP); 2286 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 2287 pte1_store(pte1p++, PTE1_LINK(pa)); 2288 } 2289 2290 /* 2291 * Now synchronize new mapping which was made above. 2292 */ 2293 pte1_sync_range(pmap->pm_pt1, NB_IN_PT1); 2294 pte2_sync_range(pmap->pm_pt2tab, NB_IN_PT2TAB); 2295 2296 CPU_ZERO(&pmap->pm_active); 2297 TAILQ_INIT(&pmap->pm_pvchunk); 2298 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2299 2300 return (1); 2301 } 2302 2303 #ifdef INVARIANTS 2304 static boolean_t 2305 pt2tab_user_is_empty(pt2_entry_t *tab) 2306 { 2307 u_int i, end; 2308 2309 end = pt2tab_index(VM_MAXUSER_ADDRESS); 2310 for (i = 0; i < end; i++) 2311 if (tab[i] != 0) return (FALSE); 2312 return (TRUE); 2313 } 2314 #endif 2315 /* 2316 * Release any resources held by the given physical map. 2317 * Called when a pmap initialized by pmap_pinit is being released. 2318 * Should only be called if the map contains no valid mappings. 2319 */ 2320 void 2321 pmap_release(pmap_t pmap) 2322 { 2323 #ifdef INVARIANTS 2324 vm_offset_t start, end; 2325 #endif 2326 KASSERT(pmap->pm_stats.resident_count == 0, 2327 ("%s: pmap resident count %ld != 0", __func__, 2328 pmap->pm_stats.resident_count)); 2329 KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab), 2330 ("%s: has allocated user PT2(s)", __func__)); 2331 KASSERT(CPU_EMPTY(&pmap->pm_active), 2332 ("%s: pmap %p is active on some CPU(s)", __func__, pmap)); 2333 2334 mtx_lock_spin(&allpmaps_lock); 2335 LIST_REMOVE(pmap, pm_list); 2336 mtx_unlock_spin(&allpmaps_lock); 2337 2338 #ifdef INVARIANTS 2339 start = pte1_index(KERNBASE) * sizeof(pt1_entry_t); 2340 end = (pte1_index(0xFFFFFFFF) + 1) * sizeof(pt1_entry_t); 2341 bzero((char *)pmap->pm_pt1 + start, end - start); 2342 2343 start = pt2tab_index(KERNBASE) * sizeof(pt2_entry_t); 2344 end = (pt2tab_index(0xFFFFFFFF) + 1) * sizeof(pt2_entry_t); 2345 bzero((char *)pmap->pm_pt2tab + start, end - start); 2346 #endif 2347 /* 2348 * We are leaving PT1 and PT2TAB allocated on released pmap, 2349 * so hopefully UMA vmspace_zone will always be inited with 2350 * UMA_ZONE_NOFREE flag. 2351 */ 2352 } 2353 2354 /********************************************************* 2355 * 2356 * L2 table pages and their pages management routines. 2357 * 2358 *********************************************************/ 2359 2360 /* 2361 * Virtual interface for L2 page table wire counting. 2362 * 2363 * Each L2 page table in a page has own counter which counts a number of 2364 * valid mappings in a table. Global page counter counts mappings in all 2365 * tables in a page plus a single itself mapping in PT2TAB. 2366 * 2367 * During a promotion we leave the associated L2 page table counter 2368 * untouched, so the table (strictly speaking a page which holds it) 2369 * is never freed if promoted. 2370 * 2371 * If a page m->ref_count == 1 then no valid mappings exist in any L2 page 2372 * table in the page and the page itself is only mapped in PT2TAB. 2373 */ 2374 2375 static __inline void 2376 pt2_wirecount_init(vm_page_t m) 2377 { 2378 u_int i; 2379 2380 /* 2381 * Note: A page m is allocated with VM_ALLOC_WIRED flag and 2382 * m->ref_count should be already set correctly. 2383 * So, there is no need to set it again herein. 2384 */ 2385 for (i = 0; i < NPT2_IN_PG; i++) 2386 m->md.pt2_wirecount[i] = 0; 2387 } 2388 2389 static __inline void 2390 pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx) 2391 { 2392 2393 /* 2394 * Note: A just modificated pte2 (i.e. already allocated) 2395 * is acquiring one extra reference which must be 2396 * explicitly cleared. It influences the KASSERTs herein. 2397 * All L2 page tables in a page always belong to the same 2398 * pmap, so we allow only one extra reference for the page. 2399 */ 2400 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1), 2401 ("%s: PT2 is overflowing ...", __func__)); 2402 KASSERT(m->ref_count <= (NPTE2_IN_PG + 1), 2403 ("%s: PT2PG is overflowing ...", __func__)); 2404 2405 m->ref_count++; 2406 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++; 2407 } 2408 2409 static __inline void 2410 pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx) 2411 { 2412 2413 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0, 2414 ("%s: PT2 is underflowing ...", __func__)); 2415 KASSERT(m->ref_count > 1, 2416 ("%s: PT2PG is underflowing ...", __func__)); 2417 2418 m->ref_count--; 2419 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--; 2420 } 2421 2422 static __inline void 2423 pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count) 2424 { 2425 2426 KASSERT(count <= NPTE2_IN_PT2, 2427 ("%s: invalid count %u", __func__, count)); 2428 KASSERT(m->ref_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK], 2429 ("%s: PT2PG corrupting (%u, %u) ...", __func__, m->ref_count, 2430 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK])); 2431 2432 m->ref_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]; 2433 m->ref_count += count; 2434 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count; 2435 2436 KASSERT(m->ref_count <= (NPTE2_IN_PG + 1), 2437 ("%s: PT2PG is overflowed (%u) ...", __func__, m->ref_count)); 2438 } 2439 2440 static __inline uint32_t 2441 pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx) 2442 { 2443 2444 return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]); 2445 } 2446 2447 static __inline boolean_t 2448 pt2_is_empty(vm_page_t m, vm_offset_t va) 2449 { 2450 2451 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0); 2452 } 2453 2454 static __inline boolean_t 2455 pt2_is_full(vm_page_t m, vm_offset_t va) 2456 { 2457 2458 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 2459 NPTE2_IN_PT2); 2460 } 2461 2462 static __inline boolean_t 2463 pt2pg_is_empty(vm_page_t m) 2464 { 2465 2466 return (m->ref_count == 1); 2467 } 2468 2469 /* 2470 * This routine is called if the L2 page table 2471 * is not mapped correctly. 2472 */ 2473 static vm_page_t 2474 _pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2475 { 2476 uint32_t pte1_idx; 2477 pt1_entry_t *pte1p; 2478 pt2_entry_t pte2; 2479 vm_page_t m; 2480 vm_paddr_t pt2pg_pa, pt2_pa; 2481 2482 pte1_idx = pte1_index(va); 2483 pte1p = pmap->pm_pt1 + pte1_idx; 2484 2485 KASSERT(pte1_load(pte1p) == 0, 2486 ("%s: pm_pt1[%#x] is not zero: %#x", __func__, pte1_idx, 2487 pte1_load(pte1p))); 2488 2489 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, va)); 2490 if (!pte2_is_valid(pte2)) { 2491 /* 2492 * Install new PT2s page into pmap PT2TAB. 2493 */ 2494 m = vm_page_alloc(NULL, pte1_idx & ~PT2PG_MASK, 2495 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2496 if (m == NULL) { 2497 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2498 PMAP_UNLOCK(pmap); 2499 rw_wunlock(&pvh_global_lock); 2500 vm_wait(NULL); 2501 rw_wlock(&pvh_global_lock); 2502 PMAP_LOCK(pmap); 2503 } 2504 2505 /* 2506 * Indicate the need to retry. While waiting, 2507 * the L2 page table page may have been allocated. 2508 */ 2509 return (NULL); 2510 } 2511 pmap->pm_stats.resident_count++; 2512 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 2513 } else { 2514 pt2pg_pa = pte2_pa(pte2); 2515 m = PHYS_TO_VM_PAGE(pt2pg_pa); 2516 } 2517 2518 pt2_wirecount_inc(m, pte1_idx); 2519 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 2520 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 2521 2522 return (m); 2523 } 2524 2525 static vm_page_t 2526 pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2527 { 2528 u_int pte1_idx; 2529 pt1_entry_t *pte1p, pte1; 2530 vm_page_t m; 2531 2532 pte1_idx = pte1_index(va); 2533 retry: 2534 pte1p = pmap->pm_pt1 + pte1_idx; 2535 pte1 = pte1_load(pte1p); 2536 2537 /* 2538 * This supports switching from a 1MB page to a 2539 * normal 4K page. 2540 */ 2541 if (pte1_is_section(pte1)) { 2542 (void)pmap_demote_pte1(pmap, pte1p, va); 2543 /* 2544 * Reload pte1 after demotion. 2545 * 2546 * Note: Demotion can even fail as either PT2 is not find for 2547 * the virtual address or PT2PG can not be allocated. 2548 */ 2549 pte1 = pte1_load(pte1p); 2550 } 2551 2552 /* 2553 * If the L2 page table page is mapped, we just increment the 2554 * hold count, and activate it. 2555 */ 2556 if (pte1_is_link(pte1)) { 2557 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2558 pt2_wirecount_inc(m, pte1_idx); 2559 } else { 2560 /* 2561 * Here if the PT2 isn't mapped, or if it has 2562 * been deallocated. 2563 */ 2564 m = _pmap_allocpte2(pmap, va, flags); 2565 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2566 goto retry; 2567 } 2568 2569 return (m); 2570 } 2571 2572 /* 2573 * Schedule the specified unused L2 page table page to be freed. Specifically, 2574 * add the page to the specified list of pages that will be released to the 2575 * physical memory manager after the TLB has been updated. 2576 */ 2577 static __inline void 2578 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free) 2579 { 2580 2581 /* 2582 * Put page on a list so that it is released after 2583 * *ALL* TLB shootdown is done 2584 */ 2585 #ifdef PMAP_DEBUG 2586 pmap_zero_page_check(m); 2587 #endif 2588 m->flags |= PG_ZERO; 2589 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 2590 } 2591 2592 /* 2593 * Unwire L2 page tables page. 2594 */ 2595 static void 2596 pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m) 2597 { 2598 pt1_entry_t *pte1p, opte1 __unused; 2599 pt2_entry_t *pte2p; 2600 uint32_t i; 2601 2602 KASSERT(pt2pg_is_empty(m), 2603 ("%s: pmap %p PT2PG %p wired", __func__, pmap, m)); 2604 2605 /* 2606 * Unmap all L2 page tables in the page from L1 page table. 2607 * 2608 * QQQ: Individual L2 page tables (except the last one) can be unmapped 2609 * earlier. However, we are doing that this way. 2610 */ 2611 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 2612 ("%s: pmap %p va %#x PT2PG %p bad index", __func__, pmap, va, m)); 2613 pte1p = pmap->pm_pt1 + m->pindex; 2614 for (i = 0; i < NPT2_IN_PG; i++, pte1p++) { 2615 KASSERT(m->md.pt2_wirecount[i] == 0, 2616 ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m)); 2617 opte1 = pte1_load(pte1p); 2618 if (pte1_is_link(opte1)) { 2619 pte1_clear(pte1p); 2620 /* 2621 * Flush intermediate TLB cache. 2622 */ 2623 pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT); 2624 } 2625 #ifdef INVARIANTS 2626 else 2627 KASSERT((opte1 == 0) || pte1_is_section(opte1), 2628 ("%s: pmap %p va %#x bad pte1 %x at %u", __func__, 2629 pmap, va, opte1, i)); 2630 #endif 2631 } 2632 2633 /* 2634 * Unmap the page from PT2TAB. 2635 */ 2636 pte2p = pmap_pt2tab_entry(pmap, va); 2637 (void)pt2tab_load_clear(pte2p); 2638 pmap_tlb_flush(pmap, pt2map_pt2pg(va)); 2639 2640 m->ref_count = 0; 2641 pmap->pm_stats.resident_count--; 2642 2643 /* 2644 * This barrier is so that the ordinary store unmapping 2645 * the L2 page table page is globally performed before TLB shoot- 2646 * down is begun. 2647 */ 2648 wmb(); 2649 vm_wire_sub(1); 2650 } 2651 2652 /* 2653 * Decrements a L2 page table page's wire count, which is used to record the 2654 * number of valid page table entries within the page. If the wire count 2655 * drops to zero, then the page table page is unmapped. Returns TRUE if the 2656 * page table page was unmapped and FALSE otherwise. 2657 */ 2658 static __inline boolean_t 2659 pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 2660 { 2661 pt2_wirecount_dec(m, pte1_index(va)); 2662 if (pt2pg_is_empty(m)) { 2663 /* 2664 * QQQ: Wire count is zero, so whole page should be zero and 2665 * we can set PG_ZERO flag to it. 2666 * Note that when promotion is enabled, it takes some 2667 * more efforts. See pmap_unwire_pt2_all() below. 2668 */ 2669 pmap_unwire_pt2pg(pmap, va, m); 2670 pmap_add_delayed_free_list(m, free); 2671 return (TRUE); 2672 } else 2673 return (FALSE); 2674 } 2675 2676 /* 2677 * Drop a L2 page table page's wire count at once, which is used to record 2678 * the number of valid L2 page table entries within the page. If the wire 2679 * count drops to zero, then the L2 page table page is unmapped. 2680 */ 2681 static __inline void 2682 pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m, 2683 struct spglist *free) 2684 { 2685 u_int pte1_idx = pte1_index(va); 2686 2687 KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK), 2688 ("%s: PT2 page's pindex is wrong", __func__)); 2689 KASSERT(m->ref_count > pt2_wirecount_get(m, pte1_idx), 2690 ("%s: bad pt2 wire count %u > %u", __func__, m->ref_count, 2691 pt2_wirecount_get(m, pte1_idx))); 2692 2693 /* 2694 * It's possible that the L2 page table was never used. 2695 * It happened in case that a section was created without promotion. 2696 */ 2697 if (pt2_is_full(m, va)) { 2698 pt2_wirecount_set(m, pte1_idx, 0); 2699 2700 /* 2701 * QQQ: We clear L2 page table now, so when L2 page table page 2702 * is going to be freed, we can set it PG_ZERO flag ... 2703 * This function is called only on section mappings, so 2704 * hopefully it's not to big overload. 2705 * 2706 * XXX: If pmap is current, existing PT2MAP mapping could be 2707 * used for zeroing. 2708 */ 2709 pmap_zero_page_area(m, page_pt2off(pte1_idx), NB_IN_PT2); 2710 } 2711 #ifdef INVARIANTS 2712 else 2713 KASSERT(pt2_is_empty(m, va), ("%s: PT2 is not empty (%u)", 2714 __func__, pt2_wirecount_get(m, pte1_idx))); 2715 #endif 2716 if (pt2pg_is_empty(m)) { 2717 pmap_unwire_pt2pg(pmap, va, m); 2718 pmap_add_delayed_free_list(m, free); 2719 } 2720 } 2721 2722 /* 2723 * After removing a L2 page table entry, this routine is used to 2724 * conditionally free the page, and manage the hold/wire counts. 2725 */ 2726 static boolean_t 2727 pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free) 2728 { 2729 pt1_entry_t pte1; 2730 vm_page_t mpte; 2731 2732 if (va >= VM_MAXUSER_ADDRESS) 2733 return (FALSE); 2734 pte1 = pte1_load(pmap_pte1(pmap, va)); 2735 mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2736 return (pmap_unwire_pt2(pmap, va, mpte, free)); 2737 } 2738 2739 /************************************* 2740 * 2741 * Page management routines. 2742 * 2743 *************************************/ 2744 2745 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 2746 CTASSERT(_NPCM == 11); 2747 CTASSERT(_NPCPV == 336); 2748 2749 static __inline struct pv_chunk * 2750 pv_to_chunk(pv_entry_t pv) 2751 { 2752 2753 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 2754 } 2755 2756 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2757 2758 #define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 2759 #define PC_FREE10 0x0000fffful /* Free values for index 10 */ 2760 2761 static const uint32_t pc_freemask[_NPCM] = { 2762 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2763 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2764 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2765 PC_FREE0_9, PC_FREE10 2766 }; 2767 2768 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 2769 "Current number of pv entries"); 2770 2771 #ifdef PV_STATS 2772 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2773 2774 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 2775 "Current number of pv entry chunks"); 2776 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 2777 "Current number of pv entry chunks allocated"); 2778 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 2779 "Current number of pv entry chunks frees"); 2780 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 2781 0, "Number of times tried to get a chunk page but failed."); 2782 2783 static long pv_entry_frees, pv_entry_allocs; 2784 static int pv_entry_spare; 2785 2786 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 2787 "Current number of pv entry frees"); 2788 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 2789 0, "Current number of pv entry allocs"); 2790 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 2791 "Current number of spare pv entries"); 2792 #endif 2793 2794 /* 2795 * Is given page managed? 2796 */ 2797 static __inline bool 2798 is_managed(vm_paddr_t pa) 2799 { 2800 vm_page_t m; 2801 2802 m = PHYS_TO_VM_PAGE(pa); 2803 if (m == NULL) 2804 return (false); 2805 return ((m->oflags & VPO_UNMANAGED) == 0); 2806 } 2807 2808 static __inline bool 2809 pte1_is_managed(pt1_entry_t pte1) 2810 { 2811 2812 return (is_managed(pte1_pa(pte1))); 2813 } 2814 2815 static __inline bool 2816 pte2_is_managed(pt2_entry_t pte2) 2817 { 2818 2819 return (is_managed(pte2_pa(pte2))); 2820 } 2821 2822 /* 2823 * We are in a serious low memory condition. Resort to 2824 * drastic measures to free some pages so we can allocate 2825 * another pv entry chunk. 2826 */ 2827 static vm_page_t 2828 pmap_pv_reclaim(pmap_t locked_pmap) 2829 { 2830 struct pch newtail; 2831 struct pv_chunk *pc; 2832 struct md_page *pvh; 2833 pt1_entry_t *pte1p; 2834 pmap_t pmap; 2835 pt2_entry_t *pte2p, tpte2; 2836 pv_entry_t pv; 2837 vm_offset_t va; 2838 vm_page_t m, m_pc; 2839 struct spglist free; 2840 uint32_t inuse; 2841 int bit, field, freed; 2842 2843 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2844 pmap = NULL; 2845 m_pc = NULL; 2846 SLIST_INIT(&free); 2847 TAILQ_INIT(&newtail); 2848 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2849 SLIST_EMPTY(&free))) { 2850 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2851 if (pmap != pc->pc_pmap) { 2852 if (pmap != NULL) { 2853 if (pmap != locked_pmap) 2854 PMAP_UNLOCK(pmap); 2855 } 2856 pmap = pc->pc_pmap; 2857 /* Avoid deadlock and lock recursion. */ 2858 if (pmap > locked_pmap) 2859 PMAP_LOCK(pmap); 2860 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2861 pmap = NULL; 2862 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2863 continue; 2864 } 2865 } 2866 2867 /* 2868 * Destroy every non-wired, 4 KB page mapping in the chunk. 2869 */ 2870 freed = 0; 2871 for (field = 0; field < _NPCM; field++) { 2872 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2873 inuse != 0; inuse &= ~(1UL << bit)) { 2874 bit = ffs(inuse) - 1; 2875 pv = &pc->pc_pventry[field * 32 + bit]; 2876 va = pv->pv_va; 2877 pte1p = pmap_pte1(pmap, va); 2878 if (pte1_is_section(pte1_load(pte1p))) 2879 continue; 2880 pte2p = pmap_pte2(pmap, va); 2881 tpte2 = pte2_load(pte2p); 2882 if ((tpte2 & PTE2_W) == 0) 2883 tpte2 = pte2_load_clear(pte2p); 2884 pmap_pte2_release(pte2p); 2885 if ((tpte2 & PTE2_W) != 0) 2886 continue; 2887 KASSERT(tpte2 != 0, 2888 ("pmap_pv_reclaim: pmap %p va %#x zero pte", 2889 pmap, va)); 2890 pmap_tlb_flush(pmap, va); 2891 m = PHYS_TO_VM_PAGE(pte2_pa(tpte2)); 2892 if (pte2_is_dirty(tpte2)) 2893 vm_page_dirty(m); 2894 if ((tpte2 & PTE2_A) != 0) 2895 vm_page_aflag_set(m, PGA_REFERENCED); 2896 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2897 if (TAILQ_EMPTY(&m->md.pv_list) && 2898 (m->flags & PG_FICTITIOUS) == 0) { 2899 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2900 if (TAILQ_EMPTY(&pvh->pv_list)) { 2901 vm_page_aflag_clear(m, 2902 PGA_WRITEABLE); 2903 } 2904 } 2905 pc->pc_map[field] |= 1UL << bit; 2906 pmap_unuse_pt2(pmap, va, &free); 2907 freed++; 2908 } 2909 } 2910 if (freed == 0) { 2911 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2912 continue; 2913 } 2914 /* Every freed mapping is for a 4 KB page. */ 2915 pmap->pm_stats.resident_count -= freed; 2916 PV_STAT(pv_entry_frees += freed); 2917 PV_STAT(pv_entry_spare += freed); 2918 pv_entry_count -= freed; 2919 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2920 for (field = 0; field < _NPCM; field++) 2921 if (pc->pc_map[field] != pc_freemask[field]) { 2922 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2923 pc_list); 2924 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2925 2926 /* 2927 * One freed pv entry in locked_pmap is 2928 * sufficient. 2929 */ 2930 if (pmap == locked_pmap) 2931 goto out; 2932 break; 2933 } 2934 if (field == _NPCM) { 2935 PV_STAT(pv_entry_spare -= _NPCPV); 2936 PV_STAT(pc_chunk_count--); 2937 PV_STAT(pc_chunk_frees++); 2938 /* Entire chunk is free; return it. */ 2939 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2940 pmap_qremove((vm_offset_t)pc, 1); 2941 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2942 break; 2943 } 2944 } 2945 out: 2946 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2947 if (pmap != NULL) { 2948 if (pmap != locked_pmap) 2949 PMAP_UNLOCK(pmap); 2950 } 2951 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2952 m_pc = SLIST_FIRST(&free); 2953 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2954 /* Recycle a freed page table page. */ 2955 m_pc->ref_count = 1; 2956 vm_wire_add(1); 2957 } 2958 vm_page_free_pages_toq(&free, false); 2959 return (m_pc); 2960 } 2961 2962 static void 2963 free_pv_chunk(struct pv_chunk *pc) 2964 { 2965 vm_page_t m; 2966 2967 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2968 PV_STAT(pv_entry_spare -= _NPCPV); 2969 PV_STAT(pc_chunk_count--); 2970 PV_STAT(pc_chunk_frees++); 2971 /* entire chunk is free, return it */ 2972 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2973 pmap_qremove((vm_offset_t)pc, 1); 2974 vm_page_unwire_noq(m); 2975 vm_page_free(m); 2976 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2977 } 2978 2979 /* 2980 * Free the pv_entry back to the free list. 2981 */ 2982 static void 2983 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2984 { 2985 struct pv_chunk *pc; 2986 int idx, field, bit; 2987 2988 rw_assert(&pvh_global_lock, RA_WLOCKED); 2989 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2990 PV_STAT(pv_entry_frees++); 2991 PV_STAT(pv_entry_spare++); 2992 pv_entry_count--; 2993 pc = pv_to_chunk(pv); 2994 idx = pv - &pc->pc_pventry[0]; 2995 field = idx / 32; 2996 bit = idx % 32; 2997 pc->pc_map[field] |= 1ul << bit; 2998 for (idx = 0; idx < _NPCM; idx++) 2999 if (pc->pc_map[idx] != pc_freemask[idx]) { 3000 /* 3001 * 98% of the time, pc is already at the head of the 3002 * list. If it isn't already, move it to the head. 3003 */ 3004 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 3005 pc)) { 3006 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3007 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 3008 pc_list); 3009 } 3010 return; 3011 } 3012 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3013 free_pv_chunk(pc); 3014 } 3015 3016 /* 3017 * Get a new pv_entry, allocating a block from the system 3018 * when needed. 3019 */ 3020 static pv_entry_t 3021 get_pv_entry(pmap_t pmap, boolean_t try) 3022 { 3023 static const struct timeval printinterval = { 60, 0 }; 3024 static struct timeval lastprint; 3025 int bit, field; 3026 pv_entry_t pv; 3027 struct pv_chunk *pc; 3028 vm_page_t m; 3029 3030 rw_assert(&pvh_global_lock, RA_WLOCKED); 3031 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3032 PV_STAT(pv_entry_allocs++); 3033 pv_entry_count++; 3034 if (pv_entry_count > pv_entry_high_water) 3035 if (ratecheck(&lastprint, &printinterval)) 3036 printf("Approaching the limit on PV entries, consider " 3037 "increasing either the vm.pmap.shpgperproc or the " 3038 "vm.pmap.pv_entries tunable.\n"); 3039 retry: 3040 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 3041 if (pc != NULL) { 3042 for (field = 0; field < _NPCM; field++) { 3043 if (pc->pc_map[field]) { 3044 bit = ffs(pc->pc_map[field]) - 1; 3045 break; 3046 } 3047 } 3048 if (field < _NPCM) { 3049 pv = &pc->pc_pventry[field * 32 + bit]; 3050 pc->pc_map[field] &= ~(1ul << bit); 3051 /* If this was the last item, move it to tail */ 3052 for (field = 0; field < _NPCM; field++) 3053 if (pc->pc_map[field] != 0) { 3054 PV_STAT(pv_entry_spare--); 3055 return (pv); /* not full, return */ 3056 } 3057 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3058 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 3059 PV_STAT(pv_entry_spare--); 3060 return (pv); 3061 } 3062 } 3063 /* 3064 * Access to the pte2list "pv_vafree" is synchronized by the pvh 3065 * global lock. If "pv_vafree" is currently non-empty, it will 3066 * remain non-empty until pmap_pte2list_alloc() completes. 3067 */ 3068 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 3069 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 3070 if (try) { 3071 pv_entry_count--; 3072 PV_STAT(pc_chunk_tryfail++); 3073 return (NULL); 3074 } 3075 m = pmap_pv_reclaim(pmap); 3076 if (m == NULL) 3077 goto retry; 3078 } 3079 PV_STAT(pc_chunk_count++); 3080 PV_STAT(pc_chunk_allocs++); 3081 pc = (struct pv_chunk *)pmap_pte2list_alloc(&pv_vafree); 3082 pmap_qenter((vm_offset_t)pc, &m, 1); 3083 pc->pc_pmap = pmap; 3084 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 3085 for (field = 1; field < _NPCM; field++) 3086 pc->pc_map[field] = pc_freemask[field]; 3087 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 3088 pv = &pc->pc_pventry[0]; 3089 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 3090 PV_STAT(pv_entry_spare += _NPCPV - 1); 3091 return (pv); 3092 } 3093 3094 /* 3095 * Create a pv entry for page at pa for 3096 * (pmap, va). 3097 */ 3098 static void 3099 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3100 { 3101 pv_entry_t pv; 3102 3103 rw_assert(&pvh_global_lock, RA_WLOCKED); 3104 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3105 pv = get_pv_entry(pmap, FALSE); 3106 pv->pv_va = va; 3107 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3108 } 3109 3110 static __inline pv_entry_t 3111 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3112 { 3113 pv_entry_t pv; 3114 3115 rw_assert(&pvh_global_lock, RA_WLOCKED); 3116 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 3117 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 3118 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 3119 break; 3120 } 3121 } 3122 return (pv); 3123 } 3124 3125 static void 3126 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3127 { 3128 pv_entry_t pv; 3129 3130 pv = pmap_pvh_remove(pvh, pmap, va); 3131 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 3132 free_pv_entry(pmap, pv); 3133 } 3134 3135 static void 3136 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 3137 { 3138 struct md_page *pvh; 3139 3140 rw_assert(&pvh_global_lock, RA_WLOCKED); 3141 pmap_pvh_free(&m->md, pmap, va); 3142 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 3143 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3144 if (TAILQ_EMPTY(&pvh->pv_list)) 3145 vm_page_aflag_clear(m, PGA_WRITEABLE); 3146 } 3147 } 3148 3149 static void 3150 pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3151 { 3152 struct md_page *pvh; 3153 pv_entry_t pv; 3154 vm_offset_t va_last; 3155 vm_page_t m; 3156 3157 rw_assert(&pvh_global_lock, RA_WLOCKED); 3158 KASSERT((pa & PTE1_OFFSET) == 0, 3159 ("pmap_pv_demote_pte1: pa is not 1mpage aligned")); 3160 3161 /* 3162 * Transfer the 1mpage's pv entry for this mapping to the first 3163 * page's pv list. 3164 */ 3165 pvh = pa_to_pvh(pa); 3166 va = pte1_trunc(va); 3167 pv = pmap_pvh_remove(pvh, pmap, va); 3168 KASSERT(pv != NULL, ("pmap_pv_demote_pte1: pv not found")); 3169 m = PHYS_TO_VM_PAGE(pa); 3170 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3171 /* Instantiate the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3172 va_last = va + PTE1_SIZE - PAGE_SIZE; 3173 do { 3174 m++; 3175 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3176 ("pmap_pv_demote_pte1: page %p is not managed", m)); 3177 va += PAGE_SIZE; 3178 pmap_insert_entry(pmap, va, m); 3179 } while (va < va_last); 3180 } 3181 3182 #if VM_NRESERVLEVEL > 0 3183 static void 3184 pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3185 { 3186 struct md_page *pvh; 3187 pv_entry_t pv; 3188 vm_offset_t va_last; 3189 vm_page_t m; 3190 3191 rw_assert(&pvh_global_lock, RA_WLOCKED); 3192 KASSERT((pa & PTE1_OFFSET) == 0, 3193 ("pmap_pv_promote_pte1: pa is not 1mpage aligned")); 3194 3195 /* 3196 * Transfer the first page's pv entry for this mapping to the 3197 * 1mpage's pv list. Aside from avoiding the cost of a call 3198 * to get_pv_entry(), a transfer avoids the possibility that 3199 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim() 3200 * removes one of the mappings that is being promoted. 3201 */ 3202 m = PHYS_TO_VM_PAGE(pa); 3203 va = pte1_trunc(va); 3204 pv = pmap_pvh_remove(&m->md, pmap, va); 3205 KASSERT(pv != NULL, ("pmap_pv_promote_pte1: pv not found")); 3206 pvh = pa_to_pvh(pa); 3207 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3208 /* Free the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3209 va_last = va + PTE1_SIZE - PAGE_SIZE; 3210 do { 3211 m++; 3212 va += PAGE_SIZE; 3213 pmap_pvh_free(&m->md, pmap, va); 3214 } while (va < va_last); 3215 } 3216 #endif 3217 3218 /* 3219 * Conditionally create a pv entry. 3220 */ 3221 static boolean_t 3222 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3223 { 3224 pv_entry_t pv; 3225 3226 rw_assert(&pvh_global_lock, RA_WLOCKED); 3227 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3228 if (pv_entry_count < pv_entry_high_water && 3229 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 3230 pv->pv_va = va; 3231 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3232 return (TRUE); 3233 } else 3234 return (FALSE); 3235 } 3236 3237 /* 3238 * Create the pv entries for each of the pages within a section. 3239 */ 3240 static bool 3241 pmap_pv_insert_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags) 3242 { 3243 struct md_page *pvh; 3244 pv_entry_t pv; 3245 bool noreclaim; 3246 3247 rw_assert(&pvh_global_lock, RA_WLOCKED); 3248 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 3249 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 3250 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 3251 return (false); 3252 pv->pv_va = va; 3253 pvh = pa_to_pvh(pte1_pa(pte1)); 3254 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3255 return (true); 3256 } 3257 3258 static inline void 3259 pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1) 3260 { 3261 3262 /* Kill all the small mappings or the big one only. */ 3263 if (pte1_is_section(npte1)) 3264 pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE); 3265 else 3266 pmap_tlb_flush(pmap, pte1_trunc(va)); 3267 } 3268 3269 /* 3270 * Update kernel pte1 on all pmaps. 3271 * 3272 * The following function is called only on one cpu with disabled interrupts. 3273 * In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way 3274 * nobody can invoke explicit hardware table walk during the update of pte1. 3275 * Unsolicited hardware table walk can still happen, invoked by speculative 3276 * data or instruction prefetch or even by speculative hardware table walk. 3277 * 3278 * The break-before-make approach should be implemented here. However, it's 3279 * not so easy to do that for kernel mappings as it would be unhappy to unmap 3280 * itself unexpectedly but voluntarily. 3281 */ 3282 static void 3283 pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1) 3284 { 3285 pmap_t pmap; 3286 pt1_entry_t *pte1p; 3287 3288 /* 3289 * Get current pmap. Interrupts should be disabled here 3290 * so PCPU_GET() is done atomically. 3291 */ 3292 pmap = PCPU_GET(curpmap); 3293 if (pmap == NULL) 3294 pmap = kernel_pmap; 3295 3296 /* 3297 * (1) Change pte1 on current pmap. 3298 * (2) Flush all obsolete TLB entries on current CPU. 3299 * (3) Change pte1 on all pmaps. 3300 * (4) Flush all obsolete TLB entries on all CPUs in SMP case. 3301 */ 3302 3303 pte1p = pmap_pte1(pmap, va); 3304 pte1_store(pte1p, npte1); 3305 3306 /* Kill all the small mappings or the big one only. */ 3307 if (pte1_is_section(npte1)) { 3308 pmap_pte1_kern_promotions++; 3309 tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE); 3310 } else { 3311 pmap_pte1_kern_demotions++; 3312 tlb_flush_local(pte1_trunc(va)); 3313 } 3314 3315 /* 3316 * In SMP case, this function is called when all cpus are at smp 3317 * rendezvous, so there is no need to use 'allpmaps_lock' lock here. 3318 * In UP case, the function is called with this lock locked. 3319 */ 3320 LIST_FOREACH(pmap, &allpmaps, pm_list) { 3321 pte1p = pmap_pte1(pmap, va); 3322 pte1_store(pte1p, npte1); 3323 } 3324 3325 #ifdef SMP 3326 /* Kill all the small mappings or the big one only. */ 3327 if (pte1_is_section(npte1)) 3328 tlb_flush_range(pte1_trunc(va), PTE1_SIZE); 3329 else 3330 tlb_flush(pte1_trunc(va)); 3331 #endif 3332 } 3333 3334 #ifdef SMP 3335 struct pte1_action { 3336 vm_offset_t va; 3337 pt1_entry_t npte1; 3338 u_int update; /* CPU that updates the PTE1 */ 3339 }; 3340 3341 static void 3342 pmap_update_pte1_action(void *arg) 3343 { 3344 struct pte1_action *act = arg; 3345 3346 if (act->update == PCPU_GET(cpuid)) 3347 pmap_update_pte1_kernel(act->va, act->npte1); 3348 } 3349 3350 /* 3351 * Change pte1 on current pmap. 3352 * Note that kernel pte1 must be changed on all pmaps. 3353 * 3354 * According to the architecture reference manual published by ARM, 3355 * the behaviour is UNPREDICTABLE when two or more TLB entries map the same VA. 3356 * According to this manual, UNPREDICTABLE behaviours must never happen in 3357 * a viable system. In contrast, on x86 processors, it is not specified which 3358 * TLB entry mapping the virtual address will be used, but the MMU doesn't 3359 * generate a bogus translation the way it does on Cortex-A8 rev 2 (Beaglebone 3360 * Black). 3361 * 3362 * It's a problem when either promotion or demotion is being done. The pte1 3363 * update and appropriate TLB flush must be done atomically in general. 3364 */ 3365 static void 3366 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3367 pt1_entry_t npte1) 3368 { 3369 3370 if (pmap == kernel_pmap) { 3371 struct pte1_action act; 3372 3373 sched_pin(); 3374 act.va = va; 3375 act.npte1 = npte1; 3376 act.update = PCPU_GET(cpuid); 3377 smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier, 3378 pmap_update_pte1_action, NULL, &act); 3379 sched_unpin(); 3380 } else { 3381 register_t cspr; 3382 3383 /* 3384 * Use break-before-make approach for changing userland 3385 * mappings. It can cause L1 translation aborts on other 3386 * cores in SMP case. So, special treatment is implemented 3387 * in pmap_fault(). To reduce the likelihood that another core 3388 * will be affected by the broken mapping, disable interrupts 3389 * until the mapping change is completed. 3390 */ 3391 cspr = disable_interrupts(PSR_I | PSR_F); 3392 pte1_clear(pte1p); 3393 pmap_tlb_flush_pte1(pmap, va, npte1); 3394 pte1_store(pte1p, npte1); 3395 restore_interrupts(cspr); 3396 } 3397 } 3398 #else 3399 static void 3400 pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3401 pt1_entry_t npte1) 3402 { 3403 3404 if (pmap == kernel_pmap) { 3405 mtx_lock_spin(&allpmaps_lock); 3406 pmap_update_pte1_kernel(va, npte1); 3407 mtx_unlock_spin(&allpmaps_lock); 3408 } else { 3409 register_t cspr; 3410 3411 /* 3412 * Use break-before-make approach for changing userland 3413 * mappings. It's absolutely safe in UP case when interrupts 3414 * are disabled. 3415 */ 3416 cspr = disable_interrupts(PSR_I | PSR_F); 3417 pte1_clear(pte1p); 3418 pmap_tlb_flush_pte1(pmap, va, npte1); 3419 pte1_store(pte1p, npte1); 3420 restore_interrupts(cspr); 3421 } 3422 } 3423 #endif 3424 3425 #if VM_NRESERVLEVEL > 0 3426 /* 3427 * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are 3428 * within a single page table page (PT2) to a single 1MB page mapping. 3429 * For promotion to occur, two conditions must be met: (1) the 4KB page 3430 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3431 * mappings must have identical characteristics. 3432 * 3433 * Managed (PG_MANAGED) mappings within the kernel address space are not 3434 * promoted. The reason is that kernel PTE1s are replicated in each pmap but 3435 * pmap_remove_write(), pmap_clear_modify(), and pmap_clear_reference() only 3436 * read the PTE1 from the kernel pmap. 3437 */ 3438 static void 3439 pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3440 { 3441 pt1_entry_t npte1; 3442 pt2_entry_t *fpte2p, fpte2, fpte2_fav; 3443 pt2_entry_t *pte2p, pte2; 3444 vm_offset_t pteva __unused; 3445 vm_page_t m __unused; 3446 3447 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3448 pmap, va, pte1_load(pte1p), pte1p)); 3449 3450 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3451 3452 /* 3453 * Examine the first PTE2 in the specified PT2. Abort if this PTE2 is 3454 * either invalid, unused, or does not map the first 4KB physical page 3455 * within a 1MB page. 3456 */ 3457 fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va)); 3458 fpte2 = pte2_load(fpte2p); 3459 if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) != 3460 (PTE2_A | PTE2_V)) { 3461 pmap_pte1_p_failures++; 3462 CTR3(KTR_PMAP, "%s: failure(1) for va %#x in pmap %p", 3463 __func__, va, pmap); 3464 return; 3465 } 3466 if (pte2_is_managed(fpte2) && pmap == kernel_pmap) { 3467 pmap_pte1_p_failures++; 3468 CTR3(KTR_PMAP, "%s: failure(2) for va %#x in pmap %p", 3469 __func__, va, pmap); 3470 return; 3471 } 3472 if ((fpte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3473 /* 3474 * When page is not modified, PTE2_RO can be set without 3475 * a TLB invalidation. 3476 */ 3477 fpte2 |= PTE2_RO; 3478 pte2_store(fpte2p, fpte2); 3479 } 3480 3481 /* 3482 * Examine each of the other PTE2s in the specified PT2. Abort if this 3483 * PTE2 maps an unexpected 4KB physical page or does not have identical 3484 * characteristics to the first PTE2. 3485 */ 3486 fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V)); 3487 fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */ 3488 for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) { 3489 pte2 = pte2_load(pte2p); 3490 if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) { 3491 pmap_pte1_p_failures++; 3492 CTR3(KTR_PMAP, "%s: failure(3) for va %#x in pmap %p", 3493 __func__, va, pmap); 3494 return; 3495 } 3496 if ((pte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3497 /* 3498 * When page is not modified, PTE2_RO can be set 3499 * without a TLB invalidation. See note above. 3500 */ 3501 pte2 |= PTE2_RO; 3502 pte2_store(pte2p, pte2); 3503 pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET & 3504 PTE2_FRAME); 3505 CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p", 3506 __func__, pteva, pmap); 3507 } 3508 if ((pte2 & PTE2_PROMOTE) != (fpte2 & PTE2_PROMOTE)) { 3509 pmap_pte1_p_failures++; 3510 CTR3(KTR_PMAP, "%s: failure(4) for va %#x in pmap %p", 3511 __func__, va, pmap); 3512 return; 3513 } 3514 3515 fpte2_fav -= PTE2_SIZE; 3516 } 3517 /* 3518 * The page table page in its current state will stay in PT2TAB 3519 * until the PTE1 mapping the section is demoted by pmap_demote_pte1() 3520 * or destroyed by pmap_remove_pte1(). 3521 * 3522 * Note that L2 page table size is not equal to PAGE_SIZE. 3523 */ 3524 m = PHYS_TO_VM_PAGE(trunc_page(pte1_link_pa(pte1_load(pte1p)))); 3525 KASSERT(m >= vm_page_array && m < &vm_page_array[vm_page_array_size], 3526 ("%s: PT2 page is out of range", __func__)); 3527 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 3528 ("%s: PT2 page's pindex is wrong", __func__)); 3529 3530 /* 3531 * Get pte1 from pte2 format. 3532 */ 3533 npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V; 3534 3535 /* 3536 * Promote the pv entries. 3537 */ 3538 if (pte2_is_managed(fpte2)) 3539 pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1)); 3540 3541 /* 3542 * Promote the mappings. 3543 */ 3544 pmap_change_pte1(pmap, pte1p, va, npte1); 3545 3546 pmap_pte1_promotions++; 3547 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3548 __func__, va, pmap); 3549 3550 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3551 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3552 } 3553 #endif /* VM_NRESERVLEVEL > 0 */ 3554 3555 /* 3556 * Zero L2 page table page. 3557 */ 3558 static __inline void 3559 pmap_clear_pt2(pt2_entry_t *fpte2p) 3560 { 3561 pt2_entry_t *pte2p; 3562 3563 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) 3564 pte2_clear(pte2p); 3565 3566 } 3567 3568 /* 3569 * Removes a 1MB page mapping from the kernel pmap. 3570 */ 3571 static void 3572 pmap_remove_kernel_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3573 { 3574 vm_page_t m; 3575 uint32_t pte1_idx; 3576 pt2_entry_t *fpte2p; 3577 vm_paddr_t pt2_pa; 3578 3579 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3580 m = pmap_pt2_page(pmap, va); 3581 if (m == NULL) 3582 /* 3583 * QQQ: Is this function called only on promoted pte1? 3584 * We certainly do section mappings directly 3585 * (without promotion) in kernel !!! 3586 */ 3587 panic("%s: missing pt2 page", __func__); 3588 3589 pte1_idx = pte1_index(va); 3590 3591 /* 3592 * Initialize the L2 page table. 3593 */ 3594 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3595 pmap_clear_pt2(fpte2p); 3596 3597 /* 3598 * Remove the mapping. 3599 */ 3600 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(m), pte1_idx); 3601 pmap_kenter_pte1(va, PTE1_LINK(pt2_pa)); 3602 3603 /* 3604 * QQQ: We do not need to invalidate PT2MAP mapping 3605 * as we did not change it. I.e. the L2 page table page 3606 * was and still is mapped the same way. 3607 */ 3608 } 3609 3610 /* 3611 * Do the things to unmap a section in a process 3612 */ 3613 static void 3614 pmap_remove_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 3615 struct spglist *free) 3616 { 3617 pt1_entry_t opte1; 3618 struct md_page *pvh; 3619 vm_offset_t eva, va; 3620 vm_page_t m; 3621 3622 PDEBUG(6, printf("%s(%p): va %#x pte1 %#x at %p\n", __func__, pmap, sva, 3623 pte1_load(pte1p), pte1p)); 3624 3625 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3626 KASSERT((sva & PTE1_OFFSET) == 0, 3627 ("%s: sva is not 1mpage aligned", __func__)); 3628 3629 /* 3630 * Clear and invalidate the mapping. It should occupy one and only TLB 3631 * entry. So, pmap_tlb_flush() called with aligned address should be 3632 * sufficient. 3633 */ 3634 opte1 = pte1_load_clear(pte1p); 3635 pmap_tlb_flush(pmap, sva); 3636 3637 if (pte1_is_wired(opte1)) 3638 pmap->pm_stats.wired_count -= PTE1_SIZE / PAGE_SIZE; 3639 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 3640 if (pte1_is_managed(opte1)) { 3641 pvh = pa_to_pvh(pte1_pa(opte1)); 3642 pmap_pvh_free(pvh, pmap, sva); 3643 eva = sva + PTE1_SIZE; 3644 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 3645 va < eva; va += PAGE_SIZE, m++) { 3646 if (pte1_is_dirty(opte1)) 3647 vm_page_dirty(m); 3648 if (opte1 & PTE1_A) 3649 vm_page_aflag_set(m, PGA_REFERENCED); 3650 if (TAILQ_EMPTY(&m->md.pv_list) && 3651 TAILQ_EMPTY(&pvh->pv_list)) 3652 vm_page_aflag_clear(m, PGA_WRITEABLE); 3653 } 3654 } 3655 if (pmap == kernel_pmap) { 3656 /* 3657 * L2 page table(s) can't be removed from kernel map as 3658 * kernel counts on it (stuff around pmap_growkernel()). 3659 */ 3660 pmap_remove_kernel_pte1(pmap, pte1p, sva); 3661 } else { 3662 /* 3663 * Get associated L2 page table page. 3664 * It's possible that the page was never allocated. 3665 */ 3666 m = pmap_pt2_page(pmap, sva); 3667 if (m != NULL) 3668 pmap_unwire_pt2_all(pmap, sva, m, free); 3669 } 3670 } 3671 3672 /* 3673 * Fills L2 page table page with mappings to consecutive physical pages. 3674 */ 3675 static __inline void 3676 pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2) 3677 { 3678 pt2_entry_t *pte2p; 3679 3680 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) { 3681 pte2_store(pte2p, npte2); 3682 npte2 += PTE2_SIZE; 3683 } 3684 } 3685 3686 /* 3687 * Tries to demote a 1MB page mapping. If demotion fails, the 3688 * 1MB page mapping is invalidated. 3689 */ 3690 static boolean_t 3691 pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3692 { 3693 pt1_entry_t opte1, npte1; 3694 pt2_entry_t *fpte2p, npte2; 3695 vm_paddr_t pt2pg_pa, pt2_pa; 3696 vm_page_t m; 3697 struct spglist free; 3698 uint32_t pte1_idx, isnew = 0; 3699 3700 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3701 pmap, va, pte1_load(pte1p), pte1p)); 3702 3703 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3704 3705 opte1 = pte1_load(pte1p); 3706 KASSERT(pte1_is_section(opte1), ("%s: opte1 not a section", __func__)); 3707 3708 if ((opte1 & PTE1_A) == 0 || (m = pmap_pt2_page(pmap, va)) == NULL) { 3709 KASSERT(!pte1_is_wired(opte1), 3710 ("%s: PT2 page for a wired mapping is missing", __func__)); 3711 3712 /* 3713 * Invalidate the 1MB page mapping and return 3714 * "failure" if the mapping was never accessed or the 3715 * allocation of the new page table page fails. 3716 */ 3717 if ((opte1 & PTE1_A) == 0 || (m = vm_page_alloc(NULL, 3718 pte1_index(va) & ~PT2PG_MASK, VM_ALLOC_NOOBJ | 3719 VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { 3720 SLIST_INIT(&free); 3721 pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free); 3722 vm_page_free_pages_toq(&free, false); 3723 CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p", 3724 __func__, va, pmap); 3725 return (FALSE); 3726 } 3727 if (va < VM_MAXUSER_ADDRESS) 3728 pmap->pm_stats.resident_count++; 3729 3730 isnew = 1; 3731 3732 /* 3733 * We init all L2 page tables in the page even if 3734 * we are going to change everything for one L2 page 3735 * table in a while. 3736 */ 3737 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 3738 } else { 3739 if (va < VM_MAXUSER_ADDRESS) { 3740 if (pt2_is_empty(m, va)) 3741 isnew = 1; /* Demoting section w/o promotion. */ 3742 #ifdef INVARIANTS 3743 else 3744 KASSERT(pt2_is_full(m, va), ("%s: bad PT2 wire" 3745 " count %u", __func__, 3746 pt2_wirecount_get(m, pte1_index(va)))); 3747 #endif 3748 } 3749 } 3750 3751 pt2pg_pa = VM_PAGE_TO_PHYS(m); 3752 pte1_idx = pte1_index(va); 3753 /* 3754 * If the pmap is current, then the PT2MAP can provide access to 3755 * the page table page (promoted L2 page tables are not unmapped). 3756 * Otherwise, temporarily map the L2 page table page (m) into 3757 * the kernel's address space at either PADDR1 or PADDR2. 3758 * 3759 * Note that L2 page table size is not equal to PAGE_SIZE. 3760 */ 3761 if (pmap_is_current(pmap)) 3762 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3763 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 3764 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 3765 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 3766 #ifdef SMP 3767 PMAP1cpu = PCPU_GET(cpuid); 3768 #endif 3769 tlb_flush_local((vm_offset_t)PADDR1); 3770 PMAP1changed++; 3771 } else 3772 #ifdef SMP 3773 if (PMAP1cpu != PCPU_GET(cpuid)) { 3774 PMAP1cpu = PCPU_GET(cpuid); 3775 tlb_flush_local((vm_offset_t)PADDR1); 3776 PMAP1changedcpu++; 3777 } else 3778 #endif 3779 PMAP1unchanged++; 3780 fpte2p = page_pt2((vm_offset_t)PADDR1, pte1_idx); 3781 } else { 3782 mtx_lock(&PMAP2mutex); 3783 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 3784 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 3785 tlb_flush((vm_offset_t)PADDR2); 3786 } 3787 fpte2p = page_pt2((vm_offset_t)PADDR2, pte1_idx); 3788 } 3789 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 3790 npte1 = PTE1_LINK(pt2_pa); 3791 3792 KASSERT((opte1 & PTE1_A) != 0, 3793 ("%s: opte1 is missing PTE1_A", __func__)); 3794 KASSERT((opte1 & (PTE1_NM | PTE1_RO)) != PTE1_NM, 3795 ("%s: opte1 has PTE1_NM", __func__)); 3796 3797 /* 3798 * Get pte2 from pte1 format. 3799 */ 3800 npte2 = pte1_pa(opte1) | ATTR_TO_L2(opte1) | PTE2_V; 3801 3802 /* 3803 * If the L2 page table page is new, initialize it. If the mapping 3804 * has changed attributes, update the page table entries. 3805 */ 3806 if (isnew != 0) { 3807 pt2_wirecount_set(m, pte1_idx, NPTE2_IN_PT2); 3808 pmap_fill_pt2(fpte2p, npte2); 3809 } else if ((pte2_load(fpte2p) & PTE2_PROMOTE) != 3810 (npte2 & PTE2_PROMOTE)) 3811 pmap_fill_pt2(fpte2p, npte2); 3812 3813 KASSERT(pte2_pa(pte2_load(fpte2p)) == pte2_pa(npte2), 3814 ("%s: fpte2p and npte2 map different physical addresses", 3815 __func__)); 3816 3817 if (fpte2p == PADDR2) 3818 mtx_unlock(&PMAP2mutex); 3819 3820 /* 3821 * Demote the mapping. This pmap is locked. The old PTE1 has 3822 * PTE1_A set. If the old PTE1 has not PTE1_RO set, it also 3823 * has not PTE1_NM set. Thus, there is no danger of a race with 3824 * another processor changing the setting of PTE1_A and/or PTE1_NM 3825 * between the read above and the store below. 3826 */ 3827 pmap_change_pte1(pmap, pte1p, va, npte1); 3828 3829 /* 3830 * Demote the pv entry. This depends on the earlier demotion 3831 * of the mapping. Specifically, the (re)creation of a per- 3832 * page pv entry might trigger the execution of pmap_pv_reclaim(), 3833 * which might reclaim a newly (re)created per-page pv entry 3834 * and destroy the associated mapping. In order to destroy 3835 * the mapping, the PTE1 must have already changed from mapping 3836 * the 1mpage to referencing the page table page. 3837 */ 3838 if (pte1_is_managed(opte1)) 3839 pmap_pv_demote_pte1(pmap, va, pte1_pa(opte1)); 3840 3841 pmap_pte1_demotions++; 3842 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3843 __func__, va, pmap); 3844 3845 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3846 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3847 return (TRUE); 3848 } 3849 3850 /* 3851 * Insert the given physical page (p) at 3852 * the specified virtual address (v) in the 3853 * target physical map with the protection requested. 3854 * 3855 * If specified, the page will be wired down, meaning 3856 * that the related pte can not be reclaimed. 3857 * 3858 * NB: This is the only routine which MAY NOT lazy-evaluate 3859 * or lose information. That is, this routine must actually 3860 * insert this page into the given map NOW. 3861 */ 3862 int 3863 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3864 u_int flags, int8_t psind) 3865 { 3866 pt1_entry_t *pte1p; 3867 pt2_entry_t *pte2p; 3868 pt2_entry_t npte2, opte2; 3869 pv_entry_t pv; 3870 vm_paddr_t opa, pa; 3871 vm_page_t mpte2, om; 3872 int rv; 3873 3874 va = trunc_page(va); 3875 KASSERT(va <= vm_max_kernel_address, ("%s: toobig", __func__)); 3876 KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, 3877 ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, 3878 va)); 3879 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || 3880 va >= kmi.clean_eva, 3881 ("%s: managed mapping within the clean submap", __func__)); 3882 if ((m->oflags & VPO_UNMANAGED) == 0) 3883 VM_PAGE_OBJECT_BUSY_ASSERT(m); 3884 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3885 ("%s: flags %u has reserved bits set", __func__, flags)); 3886 pa = VM_PAGE_TO_PHYS(m); 3887 npte2 = PTE2(pa, PTE2_A, vm_page_pte2_attr(m)); 3888 if ((flags & VM_PROT_WRITE) == 0) 3889 npte2 |= PTE2_NM; 3890 if ((prot & VM_PROT_WRITE) == 0) 3891 npte2 |= PTE2_RO; 3892 KASSERT((npte2 & (PTE2_NM | PTE2_RO)) != PTE2_RO, 3893 ("%s: flags includes VM_PROT_WRITE but prot doesn't", __func__)); 3894 if ((prot & VM_PROT_EXECUTE) == 0) 3895 npte2 |= PTE2_NX; 3896 if ((flags & PMAP_ENTER_WIRED) != 0) 3897 npte2 |= PTE2_W; 3898 if (va < VM_MAXUSER_ADDRESS) 3899 npte2 |= PTE2_U; 3900 if (pmap != kernel_pmap) 3901 npte2 |= PTE2_NG; 3902 3903 rw_wlock(&pvh_global_lock); 3904 PMAP_LOCK(pmap); 3905 sched_pin(); 3906 if (psind == 1) { 3907 /* Assert the required virtual and physical alignment. */ 3908 KASSERT((va & PTE1_OFFSET) == 0, 3909 ("%s: va unaligned", __func__)); 3910 KASSERT(m->psind > 0, ("%s: m->psind < psind", __func__)); 3911 rv = pmap_enter_pte1(pmap, va, PTE1_PA(pa) | ATTR_TO_L1(npte2) | 3912 PTE1_V, flags, m); 3913 goto out; 3914 } 3915 3916 /* 3917 * In the case that a page table page is not 3918 * resident, we are creating it here. 3919 */ 3920 if (va < VM_MAXUSER_ADDRESS) { 3921 mpte2 = pmap_allocpte2(pmap, va, flags); 3922 if (mpte2 == NULL) { 3923 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3924 ("pmap_allocpte2 failed with sleep allowed")); 3925 rv = KERN_RESOURCE_SHORTAGE; 3926 goto out; 3927 } 3928 } else 3929 mpte2 = NULL; 3930 pte1p = pmap_pte1(pmap, va); 3931 if (pte1_is_section(pte1_load(pte1p))) 3932 panic("%s: attempted on 1MB page", __func__); 3933 pte2p = pmap_pte2_quick(pmap, va); 3934 if (pte2p == NULL) 3935 panic("%s: invalid L1 page table entry va=%#x", __func__, va); 3936 3937 om = NULL; 3938 opte2 = pte2_load(pte2p); 3939 opa = pte2_pa(opte2); 3940 /* 3941 * Mapping has not changed, must be protection or wiring change. 3942 */ 3943 if (pte2_is_valid(opte2) && (opa == pa)) { 3944 /* 3945 * Wiring change, just update stats. We don't worry about 3946 * wiring PT2 pages as they remain resident as long as there 3947 * are valid mappings in them. Hence, if a user page is wired, 3948 * the PT2 page will be also. 3949 */ 3950 if (pte2_is_wired(npte2) && !pte2_is_wired(opte2)) 3951 pmap->pm_stats.wired_count++; 3952 else if (!pte2_is_wired(npte2) && pte2_is_wired(opte2)) 3953 pmap->pm_stats.wired_count--; 3954 3955 /* 3956 * Remove extra pte2 reference 3957 */ 3958 if (mpte2) 3959 pt2_wirecount_dec(mpte2, pte1_index(va)); 3960 if ((m->oflags & VPO_UNMANAGED) == 0) 3961 om = m; 3962 goto validate; 3963 } 3964 3965 /* 3966 * QQQ: We think that changing physical address on writeable mapping 3967 * is not safe. Well, maybe on kernel address space with correct 3968 * locking, it can make a sense. However, we have no idea why 3969 * anyone should do that on user address space. Are we wrong? 3970 */ 3971 KASSERT((opa == 0) || (opa == pa) || 3972 !pte2_is_valid(opte2) || ((opte2 & PTE2_RO) != 0), 3973 ("%s: pmap %p va %#x(%#x) opa %#x pa %#x - gotcha %#x %#x!", 3974 __func__, pmap, va, opte2, opa, pa, flags, prot)); 3975 3976 pv = NULL; 3977 3978 /* 3979 * Mapping has changed, invalidate old range and fall through to 3980 * handle validating new mapping. 3981 */ 3982 if (opa) { 3983 if (pte2_is_wired(opte2)) 3984 pmap->pm_stats.wired_count--; 3985 om = PHYS_TO_VM_PAGE(opa); 3986 if (om != NULL && (om->oflags & VPO_UNMANAGED) != 0) 3987 om = NULL; 3988 if (om != NULL) 3989 pv = pmap_pvh_remove(&om->md, pmap, va); 3990 3991 /* 3992 * Remove extra pte2 reference 3993 */ 3994 if (mpte2 != NULL) 3995 pt2_wirecount_dec(mpte2, va >> PTE1_SHIFT); 3996 } else 3997 pmap->pm_stats.resident_count++; 3998 3999 /* 4000 * Enter on the PV list if part of our managed memory. 4001 */ 4002 if ((m->oflags & VPO_UNMANAGED) == 0) { 4003 if (pv == NULL) { 4004 pv = get_pv_entry(pmap, FALSE); 4005 pv->pv_va = va; 4006 } 4007 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 4008 } else if (pv != NULL) 4009 free_pv_entry(pmap, pv); 4010 4011 /* 4012 * Increment counters 4013 */ 4014 if (pte2_is_wired(npte2)) 4015 pmap->pm_stats.wired_count++; 4016 4017 validate: 4018 /* 4019 * Now validate mapping with desired protection/wiring. 4020 */ 4021 if (prot & VM_PROT_WRITE) { 4022 if ((m->oflags & VPO_UNMANAGED) == 0) 4023 vm_page_aflag_set(m, PGA_WRITEABLE); 4024 } 4025 4026 /* 4027 * If the mapping or permission bits are different, we need 4028 * to update the pte2. 4029 * 4030 * QQQ: Think again and again what to do 4031 * if the mapping is going to be changed! 4032 */ 4033 if ((opte2 & ~(PTE2_NM | PTE2_A)) != (npte2 & ~(PTE2_NM | PTE2_A))) { 4034 /* 4035 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4036 * is set. Do it now, before the mapping is stored and made 4037 * valid for hardware table walk. If done later, there is a race 4038 * for other threads of current process in lazy loading case. 4039 * Don't do it for kernel memory which is mapped with exec 4040 * permission even if the memory isn't going to hold executable 4041 * code. The only time when icache sync is needed is after 4042 * kernel module is loaded and the relocation info is processed. 4043 * And it's done in elf_cpu_load_file(). 4044 * 4045 * QQQ: (1) Does it exist any better way where 4046 * or how to sync icache? 4047 * (2) Now, we do it on a page basis. 4048 */ 4049 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && 4050 m->md.pat_mode == VM_MEMATTR_WB_WA && 4051 (opa != pa || (opte2 & PTE2_NX))) 4052 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4053 4054 if (opte2 & PTE2_V) { 4055 /* Change mapping with break-before-make approach. */ 4056 opte2 = pte2_load_clear(pte2p); 4057 pmap_tlb_flush(pmap, va); 4058 pte2_store(pte2p, npte2); 4059 if (om != NULL) { 4060 KASSERT((om->oflags & VPO_UNMANAGED) == 0, 4061 ("%s: om %p unmanaged", __func__, om)); 4062 if ((opte2 & PTE2_A) != 0) 4063 vm_page_aflag_set(om, PGA_REFERENCED); 4064 if (pte2_is_dirty(opte2)) 4065 vm_page_dirty(om); 4066 if (TAILQ_EMPTY(&om->md.pv_list) && 4067 ((om->flags & PG_FICTITIOUS) != 0 || 4068 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 4069 vm_page_aflag_clear(om, PGA_WRITEABLE); 4070 } 4071 } else 4072 pte2_store(pte2p, npte2); 4073 } 4074 #if 0 4075 else { 4076 /* 4077 * QQQ: In time when both access and not mofified bits are 4078 * emulated by software, this should not happen. Some 4079 * analysis is need, if this really happen. Missing 4080 * tlb flush somewhere could be the reason. 4081 */ 4082 panic("%s: pmap %p va %#x opte2 %x npte2 %x !!", __func__, pmap, 4083 va, opte2, npte2); 4084 } 4085 #endif 4086 4087 #if VM_NRESERVLEVEL > 0 4088 /* 4089 * If both the L2 page table page and the reservation are fully 4090 * populated, then attempt promotion. 4091 */ 4092 if ((mpte2 == NULL || pt2_is_full(mpte2, va)) && 4093 sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && 4094 vm_reserv_level_iffullpop(m) == 0) 4095 pmap_promote_pte1(pmap, pte1p, va); 4096 #endif 4097 4098 rv = KERN_SUCCESS; 4099 out: 4100 sched_unpin(); 4101 rw_wunlock(&pvh_global_lock); 4102 PMAP_UNLOCK(pmap); 4103 return (rv); 4104 } 4105 4106 /* 4107 * Do the things to unmap a page in a process. 4108 */ 4109 static int 4110 pmap_remove_pte2(pmap_t pmap, pt2_entry_t *pte2p, vm_offset_t va, 4111 struct spglist *free) 4112 { 4113 pt2_entry_t opte2; 4114 vm_page_t m; 4115 4116 rw_assert(&pvh_global_lock, RA_WLOCKED); 4117 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4118 4119 /* Clear and invalidate the mapping. */ 4120 opte2 = pte2_load_clear(pte2p); 4121 pmap_tlb_flush(pmap, va); 4122 4123 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %#x not link pte2 %#x", 4124 __func__, pmap, va, opte2)); 4125 4126 if (opte2 & PTE2_W) 4127 pmap->pm_stats.wired_count -= 1; 4128 pmap->pm_stats.resident_count -= 1; 4129 if (pte2_is_managed(opte2)) { 4130 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 4131 if (pte2_is_dirty(opte2)) 4132 vm_page_dirty(m); 4133 if (opte2 & PTE2_A) 4134 vm_page_aflag_set(m, PGA_REFERENCED); 4135 pmap_remove_entry(pmap, m, va); 4136 } 4137 return (pmap_unuse_pt2(pmap, va, free)); 4138 } 4139 4140 /* 4141 * Remove a single page from a process address space. 4142 */ 4143 static void 4144 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 4145 { 4146 pt2_entry_t *pte2p; 4147 4148 rw_assert(&pvh_global_lock, RA_WLOCKED); 4149 KASSERT(curthread->td_pinned > 0, 4150 ("%s: curthread not pinned", __func__)); 4151 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4152 if ((pte2p = pmap_pte2_quick(pmap, va)) == NULL || 4153 !pte2_is_valid(pte2_load(pte2p))) 4154 return; 4155 pmap_remove_pte2(pmap, pte2p, va, free); 4156 } 4157 4158 /* 4159 * Remove the given range of addresses from the specified map. 4160 * 4161 * It is assumed that the start and end are properly 4162 * rounded to the page size. 4163 */ 4164 void 4165 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4166 { 4167 vm_offset_t nextva; 4168 pt1_entry_t *pte1p, pte1; 4169 pt2_entry_t *pte2p, pte2; 4170 struct spglist free; 4171 4172 /* 4173 * Perform an unsynchronized read. This is, however, safe. 4174 */ 4175 if (pmap->pm_stats.resident_count == 0) 4176 return; 4177 4178 SLIST_INIT(&free); 4179 4180 rw_wlock(&pvh_global_lock); 4181 sched_pin(); 4182 PMAP_LOCK(pmap); 4183 4184 /* 4185 * Special handling of removing one page. A very common 4186 * operation and easy to short circuit some code. 4187 */ 4188 if (sva + PAGE_SIZE == eva) { 4189 pte1 = pte1_load(pmap_pte1(pmap, sva)); 4190 if (pte1_is_link(pte1)) { 4191 pmap_remove_page(pmap, sva, &free); 4192 goto out; 4193 } 4194 } 4195 4196 for (; sva < eva; sva = nextva) { 4197 /* 4198 * Calculate address for next L2 page table. 4199 */ 4200 nextva = pte1_trunc(sva + PTE1_SIZE); 4201 if (nextva < sva) 4202 nextva = eva; 4203 if (pmap->pm_stats.resident_count == 0) 4204 break; 4205 4206 pte1p = pmap_pte1(pmap, sva); 4207 pte1 = pte1_load(pte1p); 4208 4209 /* 4210 * Weed out invalid mappings. Note: we assume that the L1 page 4211 * table is always allocated, and in kernel virtual. 4212 */ 4213 if (pte1 == 0) 4214 continue; 4215 4216 if (pte1_is_section(pte1)) { 4217 /* 4218 * Are we removing the entire large page? If not, 4219 * demote the mapping and fall through. 4220 */ 4221 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4222 pmap_remove_pte1(pmap, pte1p, sva, &free); 4223 continue; 4224 } else if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4225 /* The large page mapping was destroyed. */ 4226 continue; 4227 } 4228 #ifdef INVARIANTS 4229 else { 4230 /* Update pte1 after demotion. */ 4231 pte1 = pte1_load(pte1p); 4232 } 4233 #endif 4234 } 4235 4236 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 4237 " is not link", __func__, pmap, sva, pte1, pte1p)); 4238 4239 /* 4240 * Limit our scan to either the end of the va represented 4241 * by the current L2 page table page, or to the end of the 4242 * range being removed. 4243 */ 4244 if (nextva > eva) 4245 nextva = eva; 4246 4247 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; 4248 pte2p++, sva += PAGE_SIZE) { 4249 pte2 = pte2_load(pte2p); 4250 if (!pte2_is_valid(pte2)) 4251 continue; 4252 if (pmap_remove_pte2(pmap, pte2p, sva, &free)) 4253 break; 4254 } 4255 } 4256 out: 4257 sched_unpin(); 4258 rw_wunlock(&pvh_global_lock); 4259 PMAP_UNLOCK(pmap); 4260 vm_page_free_pages_toq(&free, false); 4261 } 4262 4263 /* 4264 * Routine: pmap_remove_all 4265 * Function: 4266 * Removes this physical page from 4267 * all physical maps in which it resides. 4268 * Reflects back modify bits to the pager. 4269 * 4270 * Notes: 4271 * Original versions of this routine were very 4272 * inefficient because they iteratively called 4273 * pmap_remove (slow...) 4274 */ 4275 4276 void 4277 pmap_remove_all(vm_page_t m) 4278 { 4279 struct md_page *pvh; 4280 pv_entry_t pv; 4281 pmap_t pmap; 4282 pt2_entry_t *pte2p, opte2; 4283 pt1_entry_t *pte1p; 4284 vm_offset_t va; 4285 struct spglist free; 4286 4287 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4288 ("%s: page %p is not managed", __func__, m)); 4289 SLIST_INIT(&free); 4290 rw_wlock(&pvh_global_lock); 4291 sched_pin(); 4292 if ((m->flags & PG_FICTITIOUS) != 0) 4293 goto small_mappings; 4294 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4295 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 4296 va = pv->pv_va; 4297 pmap = PV_PMAP(pv); 4298 PMAP_LOCK(pmap); 4299 pte1p = pmap_pte1(pmap, va); 4300 (void)pmap_demote_pte1(pmap, pte1p, va); 4301 PMAP_UNLOCK(pmap); 4302 } 4303 small_mappings: 4304 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 4305 pmap = PV_PMAP(pv); 4306 PMAP_LOCK(pmap); 4307 pmap->pm_stats.resident_count--; 4308 pte1p = pmap_pte1(pmap, pv->pv_va); 4309 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found " 4310 "a 1mpage in page %p's pv list", __func__, m)); 4311 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 4312 opte2 = pte2_load_clear(pte2p); 4313 pmap_tlb_flush(pmap, pv->pv_va); 4314 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %x zero pte2", 4315 __func__, pmap, pv->pv_va)); 4316 if (pte2_is_wired(opte2)) 4317 pmap->pm_stats.wired_count--; 4318 if (opte2 & PTE2_A) 4319 vm_page_aflag_set(m, PGA_REFERENCED); 4320 4321 /* 4322 * Update the vm_page_t clean and reference bits. 4323 */ 4324 if (pte2_is_dirty(opte2)) 4325 vm_page_dirty(m); 4326 pmap_unuse_pt2(pmap, pv->pv_va, &free); 4327 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4328 free_pv_entry(pmap, pv); 4329 PMAP_UNLOCK(pmap); 4330 } 4331 vm_page_aflag_clear(m, PGA_WRITEABLE); 4332 sched_unpin(); 4333 rw_wunlock(&pvh_global_lock); 4334 vm_page_free_pages_toq(&free, false); 4335 } 4336 4337 /* 4338 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4339 * good coding style, a.k.a. 80 character line width limit hell. 4340 */ 4341 static __inline void 4342 pmap_remove_pte1_quick(pmap_t pmap, pt1_entry_t pte1, pv_entry_t pv, 4343 struct spglist *free) 4344 { 4345 vm_paddr_t pa; 4346 vm_page_t m, mt, mpt2pg; 4347 struct md_page *pvh; 4348 4349 pa = pte1_pa(pte1); 4350 m = PHYS_TO_VM_PAGE(pa); 4351 4352 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4353 __func__, m, m->phys_addr, pa)); 4354 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4355 m < &vm_page_array[vm_page_array_size], 4356 ("%s: bad pte1 %#x", __func__, pte1)); 4357 4358 if (pte1_is_dirty(pte1)) { 4359 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4360 vm_page_dirty(mt); 4361 } 4362 4363 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 4364 pvh = pa_to_pvh(pa); 4365 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4366 if (TAILQ_EMPTY(&pvh->pv_list)) { 4367 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4368 if (TAILQ_EMPTY(&mt->md.pv_list)) 4369 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4370 } 4371 mpt2pg = pmap_pt2_page(pmap, pv->pv_va); 4372 if (mpt2pg != NULL) 4373 pmap_unwire_pt2_all(pmap, pv->pv_va, mpt2pg, free); 4374 } 4375 4376 /* 4377 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4378 * good coding style, a.k.a. 80 character line width limit hell. 4379 */ 4380 static __inline void 4381 pmap_remove_pte2_quick(pmap_t pmap, pt2_entry_t pte2, pv_entry_t pv, 4382 struct spglist *free) 4383 { 4384 vm_paddr_t pa; 4385 vm_page_t m; 4386 struct md_page *pvh; 4387 4388 pa = pte2_pa(pte2); 4389 m = PHYS_TO_VM_PAGE(pa); 4390 4391 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4392 __func__, m, m->phys_addr, pa)); 4393 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4394 m < &vm_page_array[vm_page_array_size], 4395 ("%s: bad pte2 %#x", __func__, pte2)); 4396 4397 if (pte2_is_dirty(pte2)) 4398 vm_page_dirty(m); 4399 4400 pmap->pm_stats.resident_count--; 4401 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4402 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 4403 pvh = pa_to_pvh(pa); 4404 if (TAILQ_EMPTY(&pvh->pv_list)) 4405 vm_page_aflag_clear(m, PGA_WRITEABLE); 4406 } 4407 pmap_unuse_pt2(pmap, pv->pv_va, free); 4408 } 4409 4410 /* 4411 * Remove all pages from specified address space this aids process 4412 * exit speeds. Also, this code is special cased for current process 4413 * only, but can have the more generic (and slightly slower) mode enabled. 4414 * This is much faster than pmap_remove in the case of running down 4415 * an entire address space. 4416 */ 4417 void 4418 pmap_remove_pages(pmap_t pmap) 4419 { 4420 pt1_entry_t *pte1p, pte1; 4421 pt2_entry_t *pte2p, pte2; 4422 pv_entry_t pv; 4423 struct pv_chunk *pc, *npc; 4424 struct spglist free; 4425 int field, idx; 4426 int32_t bit; 4427 uint32_t inuse, bitmask; 4428 boolean_t allfree; 4429 4430 /* 4431 * Assert that the given pmap is only active on the current 4432 * CPU. Unfortunately, we cannot block another CPU from 4433 * activating the pmap while this function is executing. 4434 */ 4435 KASSERT(pmap == vmspace_pmap(curthread->td_proc->p_vmspace), 4436 ("%s: non-current pmap %p", __func__, pmap)); 4437 #if defined(SMP) && defined(INVARIANTS) 4438 { 4439 cpuset_t other_cpus; 4440 4441 sched_pin(); 4442 other_cpus = pmap->pm_active; 4443 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 4444 sched_unpin(); 4445 KASSERT(CPU_EMPTY(&other_cpus), 4446 ("%s: pmap %p active on other cpus", __func__, pmap)); 4447 } 4448 #endif 4449 SLIST_INIT(&free); 4450 rw_wlock(&pvh_global_lock); 4451 PMAP_LOCK(pmap); 4452 sched_pin(); 4453 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4454 KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p", 4455 __func__, pmap, pc->pc_pmap)); 4456 allfree = TRUE; 4457 for (field = 0; field < _NPCM; field++) { 4458 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 4459 while (inuse != 0) { 4460 bit = ffs(inuse) - 1; 4461 bitmask = 1UL << bit; 4462 idx = field * 32 + bit; 4463 pv = &pc->pc_pventry[idx]; 4464 inuse &= ~bitmask; 4465 4466 /* 4467 * Note that we cannot remove wired pages 4468 * from a process' mapping at this time 4469 */ 4470 pte1p = pmap_pte1(pmap, pv->pv_va); 4471 pte1 = pte1_load(pte1p); 4472 if (pte1_is_section(pte1)) { 4473 if (pte1_is_wired(pte1)) { 4474 allfree = FALSE; 4475 continue; 4476 } 4477 pte1_clear(pte1p); 4478 pmap_remove_pte1_quick(pmap, pte1, pv, 4479 &free); 4480 } 4481 else if (pte1_is_link(pte1)) { 4482 pte2p = pt2map_entry(pv->pv_va); 4483 pte2 = pte2_load(pte2p); 4484 4485 if (!pte2_is_valid(pte2)) { 4486 printf("%s: pmap %p va %#x " 4487 "pte2 %#x\n", __func__, 4488 pmap, pv->pv_va, pte2); 4489 panic("bad pte2"); 4490 } 4491 4492 if (pte2_is_wired(pte2)) { 4493 allfree = FALSE; 4494 continue; 4495 } 4496 pte2_clear(pte2p); 4497 pmap_remove_pte2_quick(pmap, pte2, pv, 4498 &free); 4499 } else { 4500 printf("%s: pmap %p va %#x pte1 %#x\n", 4501 __func__, pmap, pv->pv_va, pte1); 4502 panic("bad pte1"); 4503 } 4504 4505 /* Mark free */ 4506 PV_STAT(pv_entry_frees++); 4507 PV_STAT(pv_entry_spare++); 4508 pv_entry_count--; 4509 pc->pc_map[field] |= bitmask; 4510 } 4511 } 4512 if (allfree) { 4513 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4514 free_pv_chunk(pc); 4515 } 4516 } 4517 tlb_flush_all_ng_local(); 4518 sched_unpin(); 4519 rw_wunlock(&pvh_global_lock); 4520 PMAP_UNLOCK(pmap); 4521 vm_page_free_pages_toq(&free, false); 4522 } 4523 4524 /* 4525 * This code makes some *MAJOR* assumptions: 4526 * 1. Current pmap & pmap exists. 4527 * 2. Not wired. 4528 * 3. Read access. 4529 * 4. No L2 page table pages. 4530 * but is *MUCH* faster than pmap_enter... 4531 */ 4532 static vm_page_t 4533 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4534 vm_prot_t prot, vm_page_t mpt2pg) 4535 { 4536 pt2_entry_t *pte2p, pte2; 4537 vm_paddr_t pa; 4538 struct spglist free; 4539 uint32_t l2prot; 4540 4541 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 4542 (m->oflags & VPO_UNMANAGED) != 0, 4543 ("%s: managed mapping within the clean submap", __func__)); 4544 rw_assert(&pvh_global_lock, RA_WLOCKED); 4545 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4546 4547 /* 4548 * In the case that a L2 page table page is not 4549 * resident, we are creating it here. 4550 */ 4551 if (va < VM_MAXUSER_ADDRESS) { 4552 u_int pte1_idx; 4553 pt1_entry_t pte1, *pte1p; 4554 vm_paddr_t pt2_pa; 4555 4556 /* 4557 * Get L1 page table things. 4558 */ 4559 pte1_idx = pte1_index(va); 4560 pte1p = pmap_pte1(pmap, va); 4561 pte1 = pte1_load(pte1p); 4562 4563 if (mpt2pg && (mpt2pg->pindex == (pte1_idx & ~PT2PG_MASK))) { 4564 /* 4565 * Each of NPT2_IN_PG L2 page tables on the page can 4566 * come here. Make sure that associated L1 page table 4567 * link is established. 4568 * 4569 * QQQ: It comes that we don't establish all links to 4570 * L2 page tables for newly allocated L2 page 4571 * tables page. 4572 */ 4573 KASSERT(!pte1_is_section(pte1), 4574 ("%s: pte1 %#x is section", __func__, pte1)); 4575 if (!pte1_is_link(pte1)) { 4576 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(mpt2pg), 4577 pte1_idx); 4578 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 4579 } 4580 pt2_wirecount_inc(mpt2pg, pte1_idx); 4581 } else { 4582 /* 4583 * If the L2 page table page is mapped, we just 4584 * increment the hold count, and activate it. 4585 */ 4586 if (pte1_is_section(pte1)) { 4587 return (NULL); 4588 } else if (pte1_is_link(pte1)) { 4589 mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 4590 pt2_wirecount_inc(mpt2pg, pte1_idx); 4591 } else { 4592 mpt2pg = _pmap_allocpte2(pmap, va, 4593 PMAP_ENTER_NOSLEEP); 4594 if (mpt2pg == NULL) 4595 return (NULL); 4596 } 4597 } 4598 } else { 4599 mpt2pg = NULL; 4600 } 4601 4602 /* 4603 * This call to pt2map_entry() makes the assumption that we are 4604 * entering the page into the current pmap. In order to support 4605 * quick entry into any pmap, one would likely use pmap_pte2_quick(). 4606 * But that isn't as quick as pt2map_entry(). 4607 */ 4608 pte2p = pt2map_entry(va); 4609 pte2 = pte2_load(pte2p); 4610 if (pte2_is_valid(pte2)) { 4611 if (mpt2pg != NULL) { 4612 /* 4613 * Remove extra pte2 reference 4614 */ 4615 pt2_wirecount_dec(mpt2pg, pte1_index(va)); 4616 mpt2pg = NULL; 4617 } 4618 return (NULL); 4619 } 4620 4621 /* 4622 * Enter on the PV list if part of our managed memory. 4623 */ 4624 if ((m->oflags & VPO_UNMANAGED) == 0 && 4625 !pmap_try_insert_pv_entry(pmap, va, m)) { 4626 if (mpt2pg != NULL) { 4627 SLIST_INIT(&free); 4628 if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) { 4629 pmap_tlb_flush(pmap, va); 4630 vm_page_free_pages_toq(&free, false); 4631 } 4632 4633 mpt2pg = NULL; 4634 } 4635 return (NULL); 4636 } 4637 4638 /* 4639 * Increment counters 4640 */ 4641 pmap->pm_stats.resident_count++; 4642 4643 /* 4644 * Now validate mapping with RO protection 4645 */ 4646 pa = VM_PAGE_TO_PHYS(m); 4647 l2prot = PTE2_RO | PTE2_NM; 4648 if (va < VM_MAXUSER_ADDRESS) 4649 l2prot |= PTE2_U | PTE2_NG; 4650 if ((prot & VM_PROT_EXECUTE) == 0) 4651 l2prot |= PTE2_NX; 4652 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) { 4653 /* 4654 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4655 * is set. QQQ: For more info, see comments in pmap_enter(). 4656 */ 4657 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4658 } 4659 pte2_store(pte2p, PTE2(pa, l2prot, vm_page_pte2_attr(m))); 4660 4661 return (mpt2pg); 4662 } 4663 4664 void 4665 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4666 { 4667 4668 rw_wlock(&pvh_global_lock); 4669 PMAP_LOCK(pmap); 4670 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4671 rw_wunlock(&pvh_global_lock); 4672 PMAP_UNLOCK(pmap); 4673 } 4674 4675 /* 4676 * Tries to create a read- and/or execute-only 1 MB page mapping. Returns 4677 * true if successful. Returns false if (1) a mapping already exists at the 4678 * specified virtual address or (2) a PV entry cannot be allocated without 4679 * reclaiming another PV entry. 4680 */ 4681 static bool 4682 pmap_enter_1mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4683 { 4684 pt1_entry_t pte1; 4685 vm_paddr_t pa; 4686 4687 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4688 pa = VM_PAGE_TO_PHYS(m); 4689 pte1 = PTE1(pa, PTE1_NM | PTE1_RO, ATTR_TO_L1(vm_page_pte2_attr(m))); 4690 if ((prot & VM_PROT_EXECUTE) == 0) 4691 pte1 |= PTE1_NX; 4692 if (va < VM_MAXUSER_ADDRESS) 4693 pte1 |= PTE1_U; 4694 if (pmap != kernel_pmap) 4695 pte1 |= PTE1_NG; 4696 return (pmap_enter_pte1(pmap, va, pte1, PMAP_ENTER_NOSLEEP | 4697 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m) == KERN_SUCCESS); 4698 } 4699 4700 /* 4701 * Tries to create the specified 1 MB page mapping. Returns KERN_SUCCESS if 4702 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE 4703 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and 4704 * a mapping already exists at the specified virtual address. Returns 4705 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NORECLAIM was specified and PV entry 4706 * allocation failed. 4707 */ 4708 static int 4709 pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags, 4710 vm_page_t m) 4711 { 4712 struct spglist free; 4713 pt1_entry_t opte1, *pte1p; 4714 pt2_entry_t pte2, *pte2p; 4715 vm_offset_t cur, end; 4716 vm_page_t mt; 4717 4718 rw_assert(&pvh_global_lock, RA_WLOCKED); 4719 KASSERT((pte1 & (PTE1_NM | PTE1_RO)) == 0 || 4720 (pte1 & (PTE1_NM | PTE1_RO)) == (PTE1_NM | PTE1_RO), 4721 ("%s: pte1 has inconsistent NM and RO attributes", __func__)); 4722 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4723 pte1p = pmap_pte1(pmap, va); 4724 opte1 = pte1_load(pte1p); 4725 if (pte1_is_valid(opte1)) { 4726 if ((flags & PMAP_ENTER_NOREPLACE) != 0) { 4727 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4728 __func__, va, pmap); 4729 return (KERN_FAILURE); 4730 } 4731 /* Break the existing mapping(s). */ 4732 SLIST_INIT(&free); 4733 if (pte1_is_section(opte1)) { 4734 /* 4735 * If the section resulted from a promotion, then a 4736 * reserved PT page could be freed. 4737 */ 4738 pmap_remove_pte1(pmap, pte1p, va, &free); 4739 } else { 4740 sched_pin(); 4741 end = va + PTE1_SIZE; 4742 for (cur = va, pte2p = pmap_pte2_quick(pmap, va); 4743 cur != end; cur += PAGE_SIZE, pte2p++) { 4744 pte2 = pte2_load(pte2p); 4745 if (!pte2_is_valid(pte2)) 4746 continue; 4747 if (pmap_remove_pte2(pmap, pte2p, cur, &free)) 4748 break; 4749 } 4750 sched_unpin(); 4751 } 4752 vm_page_free_pages_toq(&free, false); 4753 } 4754 if ((m->oflags & VPO_UNMANAGED) == 0) { 4755 /* 4756 * Abort this mapping if its PV entry could not be created. 4757 */ 4758 if (!pmap_pv_insert_pte1(pmap, va, pte1, flags)) { 4759 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4760 __func__, va, pmap); 4761 return (KERN_RESOURCE_SHORTAGE); 4762 } 4763 if ((pte1 & PTE1_RO) == 0) { 4764 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4765 vm_page_aflag_set(mt, PGA_WRITEABLE); 4766 } 4767 } 4768 4769 /* 4770 * Increment counters. 4771 */ 4772 if (pte1_is_wired(pte1)) 4773 pmap->pm_stats.wired_count += PTE1_SIZE / PAGE_SIZE; 4774 pmap->pm_stats.resident_count += PTE1_SIZE / PAGE_SIZE; 4775 4776 /* 4777 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4778 * is set. QQQ: For more info, see comments in pmap_enter(). 4779 */ 4780 if ((pte1 & PTE1_NX) == 0 && m->md.pat_mode == VM_MEMATTR_WB_WA && 4781 pmap != kernel_pmap && (!pte1_is_section(opte1) || 4782 pte1_pa(opte1) != VM_PAGE_TO_PHYS(m) || (opte1 & PTE2_NX) != 0)) 4783 cache_icache_sync_fresh(va, VM_PAGE_TO_PHYS(m), PTE1_SIZE); 4784 4785 /* 4786 * Map the section. 4787 */ 4788 pte1_store(pte1p, pte1); 4789 4790 pmap_pte1_mappings++; 4791 CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, 4792 pmap); 4793 return (KERN_SUCCESS); 4794 } 4795 4796 /* 4797 * Maps a sequence of resident pages belonging to the same object. 4798 * The sequence begins with the given page m_start. This page is 4799 * mapped at the given virtual address start. Each subsequent page is 4800 * mapped at a virtual address that is offset from start by the same 4801 * amount as the page is offset from m_start within the object. The 4802 * last page in the sequence is the page with the largest offset from 4803 * m_start that can be mapped at a virtual address less than the given 4804 * virtual address end. Not every virtual page between start and end 4805 * is mapped; only those for which a resident page exists with the 4806 * corresponding offset from m_start are mapped. 4807 */ 4808 void 4809 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4810 vm_page_t m_start, vm_prot_t prot) 4811 { 4812 vm_offset_t va; 4813 vm_page_t m, mpt2pg; 4814 vm_pindex_t diff, psize; 4815 4816 PDEBUG(6, printf("%s: pmap %p start %#x end %#x m %p prot %#x\n", 4817 __func__, pmap, start, end, m_start, prot)); 4818 4819 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4820 psize = atop(end - start); 4821 mpt2pg = NULL; 4822 m = m_start; 4823 rw_wlock(&pvh_global_lock); 4824 PMAP_LOCK(pmap); 4825 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4826 va = start + ptoa(diff); 4827 if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end && 4828 m->psind == 1 && sp_enabled && 4829 pmap_enter_1mpage(pmap, va, m, prot)) 4830 m = &m[PTE1_SIZE / PAGE_SIZE - 1]; 4831 else 4832 mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot, 4833 mpt2pg); 4834 m = TAILQ_NEXT(m, listq); 4835 } 4836 rw_wunlock(&pvh_global_lock); 4837 PMAP_UNLOCK(pmap); 4838 } 4839 4840 /* 4841 * This code maps large physical mmap regions into the 4842 * processor address space. Note that some shortcuts 4843 * are taken, but the code works. 4844 */ 4845 void 4846 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 4847 vm_pindex_t pindex, vm_size_t size) 4848 { 4849 pt1_entry_t *pte1p; 4850 vm_paddr_t pa, pte2_pa; 4851 vm_page_t p; 4852 vm_memattr_t pat_mode; 4853 u_int l1attr, l1prot; 4854 4855 VM_OBJECT_ASSERT_WLOCKED(object); 4856 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4857 ("%s: non-device object", __func__)); 4858 if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) { 4859 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4860 return; 4861 p = vm_page_lookup(object, pindex); 4862 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4863 ("%s: invalid page %p", __func__, p)); 4864 pat_mode = p->md.pat_mode; 4865 4866 /* 4867 * Abort the mapping if the first page is not physically 4868 * aligned to a 1MB page boundary. 4869 */ 4870 pte2_pa = VM_PAGE_TO_PHYS(p); 4871 if (pte2_pa & PTE1_OFFSET) 4872 return; 4873 4874 /* 4875 * Skip the first page. Abort the mapping if the rest of 4876 * the pages are not physically contiguous or have differing 4877 * memory attributes. 4878 */ 4879 p = TAILQ_NEXT(p, listq); 4880 for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size; 4881 pa += PAGE_SIZE) { 4882 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4883 ("%s: invalid page %p", __func__, p)); 4884 if (pa != VM_PAGE_TO_PHYS(p) || 4885 pat_mode != p->md.pat_mode) 4886 return; 4887 p = TAILQ_NEXT(p, listq); 4888 } 4889 4890 /* 4891 * Map using 1MB pages. 4892 * 4893 * QQQ: Well, we are mapping a section, so same condition must 4894 * be hold like during promotion. It looks that only RW mapping 4895 * is done here, so readonly mapping must be done elsewhere. 4896 */ 4897 l1prot = PTE1_U | PTE1_NG | PTE1_RW | PTE1_M | PTE1_A; 4898 l1attr = ATTR_TO_L1(vm_memattr_to_pte2(pat_mode)); 4899 PMAP_LOCK(pmap); 4900 for (pa = pte2_pa; pa < pte2_pa + size; pa += PTE1_SIZE) { 4901 pte1p = pmap_pte1(pmap, addr); 4902 if (!pte1_is_valid(pte1_load(pte1p))) { 4903 pte1_store(pte1p, PTE1(pa, l1prot, l1attr)); 4904 pmap->pm_stats.resident_count += PTE1_SIZE / 4905 PAGE_SIZE; 4906 pmap_pte1_mappings++; 4907 } 4908 /* Else continue on if the PTE1 is already valid. */ 4909 addr += PTE1_SIZE; 4910 } 4911 PMAP_UNLOCK(pmap); 4912 } 4913 } 4914 4915 /* 4916 * Do the things to protect a 1mpage in a process. 4917 */ 4918 static void 4919 pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 4920 vm_prot_t prot) 4921 { 4922 pt1_entry_t npte1, opte1; 4923 vm_offset_t eva, va; 4924 vm_page_t m; 4925 4926 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4927 KASSERT((sva & PTE1_OFFSET) == 0, 4928 ("%s: sva is not 1mpage aligned", __func__)); 4929 4930 opte1 = npte1 = pte1_load(pte1p); 4931 if (pte1_is_managed(opte1) && pte1_is_dirty(opte1)) { 4932 eva = sva + PTE1_SIZE; 4933 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 4934 va < eva; va += PAGE_SIZE, m++) 4935 vm_page_dirty(m); 4936 } 4937 if ((prot & VM_PROT_WRITE) == 0) 4938 npte1 |= PTE1_RO | PTE1_NM; 4939 if ((prot & VM_PROT_EXECUTE) == 0) 4940 npte1 |= PTE1_NX; 4941 4942 /* 4943 * QQQ: Herein, execute permission is never set. 4944 * It only can be cleared. So, no icache 4945 * syncing is needed. 4946 */ 4947 4948 if (npte1 != opte1) { 4949 pte1_store(pte1p, npte1); 4950 pmap_tlb_flush(pmap, sva); 4951 } 4952 } 4953 4954 /* 4955 * Set the physical protection on the 4956 * specified range of this map as requested. 4957 */ 4958 void 4959 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 4960 { 4961 boolean_t pv_lists_locked; 4962 vm_offset_t nextva; 4963 pt1_entry_t *pte1p, pte1; 4964 pt2_entry_t *pte2p, opte2, npte2; 4965 4966 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 4967 if (prot == VM_PROT_NONE) { 4968 pmap_remove(pmap, sva, eva); 4969 return; 4970 } 4971 4972 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 4973 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 4974 return; 4975 4976 if (pmap_is_current(pmap)) 4977 pv_lists_locked = FALSE; 4978 else { 4979 pv_lists_locked = TRUE; 4980 resume: 4981 rw_wlock(&pvh_global_lock); 4982 sched_pin(); 4983 } 4984 4985 PMAP_LOCK(pmap); 4986 for (; sva < eva; sva = nextva) { 4987 /* 4988 * Calculate address for next L2 page table. 4989 */ 4990 nextva = pte1_trunc(sva + PTE1_SIZE); 4991 if (nextva < sva) 4992 nextva = eva; 4993 4994 pte1p = pmap_pte1(pmap, sva); 4995 pte1 = pte1_load(pte1p); 4996 4997 /* 4998 * Weed out invalid mappings. Note: we assume that L1 page 4999 * page table is always allocated, and in kernel virtual. 5000 */ 5001 if (pte1 == 0) 5002 continue; 5003 5004 if (pte1_is_section(pte1)) { 5005 /* 5006 * Are we protecting the entire large page? If not, 5007 * demote the mapping and fall through. 5008 */ 5009 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 5010 pmap_protect_pte1(pmap, pte1p, sva, prot); 5011 continue; 5012 } else { 5013 if (!pv_lists_locked) { 5014 pv_lists_locked = TRUE; 5015 if (!rw_try_wlock(&pvh_global_lock)) { 5016 PMAP_UNLOCK(pmap); 5017 goto resume; 5018 } 5019 sched_pin(); 5020 } 5021 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 5022 /* 5023 * The large page mapping 5024 * was destroyed. 5025 */ 5026 continue; 5027 } 5028 #ifdef INVARIANTS 5029 else { 5030 /* Update pte1 after demotion */ 5031 pte1 = pte1_load(pte1p); 5032 } 5033 #endif 5034 } 5035 } 5036 5037 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5038 " is not link", __func__, pmap, sva, pte1, pte1p)); 5039 5040 /* 5041 * Limit our scan to either the end of the va represented 5042 * by the current L2 page table page, or to the end of the 5043 * range being protected. 5044 */ 5045 if (nextva > eva) 5046 nextva = eva; 5047 5048 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5049 sva += PAGE_SIZE) { 5050 vm_page_t m; 5051 5052 opte2 = npte2 = pte2_load(pte2p); 5053 if (!pte2_is_valid(opte2)) 5054 continue; 5055 5056 if ((prot & VM_PROT_WRITE) == 0) { 5057 if (pte2_is_managed(opte2) && 5058 pte2_is_dirty(opte2)) { 5059 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 5060 vm_page_dirty(m); 5061 } 5062 npte2 |= PTE2_RO | PTE2_NM; 5063 } 5064 5065 if ((prot & VM_PROT_EXECUTE) == 0) 5066 npte2 |= PTE2_NX; 5067 5068 /* 5069 * QQQ: Herein, execute permission is never set. 5070 * It only can be cleared. So, no icache 5071 * syncing is needed. 5072 */ 5073 5074 if (npte2 != opte2) { 5075 pte2_store(pte2p, npte2); 5076 pmap_tlb_flush(pmap, sva); 5077 } 5078 } 5079 } 5080 if (pv_lists_locked) { 5081 sched_unpin(); 5082 rw_wunlock(&pvh_global_lock); 5083 } 5084 PMAP_UNLOCK(pmap); 5085 } 5086 5087 /* 5088 * pmap_pvh_wired_mappings: 5089 * 5090 * Return the updated number "count" of managed mappings that are wired. 5091 */ 5092 static int 5093 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 5094 { 5095 pmap_t pmap; 5096 pt1_entry_t pte1; 5097 pt2_entry_t pte2; 5098 pv_entry_t pv; 5099 5100 rw_assert(&pvh_global_lock, RA_WLOCKED); 5101 sched_pin(); 5102 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5103 pmap = PV_PMAP(pv); 5104 PMAP_LOCK(pmap); 5105 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5106 if (pte1_is_section(pte1)) { 5107 if (pte1_is_wired(pte1)) 5108 count++; 5109 } else { 5110 KASSERT(pte1_is_link(pte1), 5111 ("%s: pte1 %#x is not link", __func__, pte1)); 5112 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5113 if (pte2_is_wired(pte2)) 5114 count++; 5115 } 5116 PMAP_UNLOCK(pmap); 5117 } 5118 sched_unpin(); 5119 return (count); 5120 } 5121 5122 /* 5123 * pmap_page_wired_mappings: 5124 * 5125 * Return the number of managed mappings to the given physical page 5126 * that are wired. 5127 */ 5128 int 5129 pmap_page_wired_mappings(vm_page_t m) 5130 { 5131 int count; 5132 5133 count = 0; 5134 if ((m->oflags & VPO_UNMANAGED) != 0) 5135 return (count); 5136 rw_wlock(&pvh_global_lock); 5137 count = pmap_pvh_wired_mappings(&m->md, count); 5138 if ((m->flags & PG_FICTITIOUS) == 0) { 5139 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 5140 count); 5141 } 5142 rw_wunlock(&pvh_global_lock); 5143 return (count); 5144 } 5145 5146 /* 5147 * Returns TRUE if any of the given mappings were used to modify 5148 * physical memory. Otherwise, returns FALSE. Both page and 1mpage 5149 * mappings are supported. 5150 */ 5151 static boolean_t 5152 pmap_is_modified_pvh(struct md_page *pvh) 5153 { 5154 pv_entry_t pv; 5155 pt1_entry_t pte1; 5156 pt2_entry_t pte2; 5157 pmap_t pmap; 5158 boolean_t rv; 5159 5160 rw_assert(&pvh_global_lock, RA_WLOCKED); 5161 rv = FALSE; 5162 sched_pin(); 5163 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5164 pmap = PV_PMAP(pv); 5165 PMAP_LOCK(pmap); 5166 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5167 if (pte1_is_section(pte1)) { 5168 rv = pte1_is_dirty(pte1); 5169 } else { 5170 KASSERT(pte1_is_link(pte1), 5171 ("%s: pte1 %#x is not link", __func__, pte1)); 5172 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5173 rv = pte2_is_dirty(pte2); 5174 } 5175 PMAP_UNLOCK(pmap); 5176 if (rv) 5177 break; 5178 } 5179 sched_unpin(); 5180 return (rv); 5181 } 5182 5183 /* 5184 * pmap_is_modified: 5185 * 5186 * Return whether or not the specified physical page was modified 5187 * in any physical maps. 5188 */ 5189 boolean_t 5190 pmap_is_modified(vm_page_t m) 5191 { 5192 boolean_t rv; 5193 5194 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5195 ("%s: page %p is not managed", __func__, m)); 5196 5197 /* 5198 * If the page is not busied then this check is racy. 5199 */ 5200 if (!pmap_page_is_write_mapped(m)) 5201 return (FALSE); 5202 rw_wlock(&pvh_global_lock); 5203 rv = pmap_is_modified_pvh(&m->md) || 5204 ((m->flags & PG_FICTITIOUS) == 0 && 5205 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5206 rw_wunlock(&pvh_global_lock); 5207 return (rv); 5208 } 5209 5210 /* 5211 * pmap_is_prefaultable: 5212 * 5213 * Return whether or not the specified virtual address is eligible 5214 * for prefault. 5215 */ 5216 boolean_t 5217 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 5218 { 5219 pt1_entry_t pte1; 5220 pt2_entry_t pte2; 5221 boolean_t rv; 5222 5223 rv = FALSE; 5224 PMAP_LOCK(pmap); 5225 pte1 = pte1_load(pmap_pte1(pmap, addr)); 5226 if (pte1_is_link(pte1)) { 5227 pte2 = pte2_load(pt2map_entry(addr)); 5228 rv = !pte2_is_valid(pte2) ; 5229 } 5230 PMAP_UNLOCK(pmap); 5231 return (rv); 5232 } 5233 5234 /* 5235 * Returns TRUE if any of the given mappings were referenced and FALSE 5236 * otherwise. Both page and 1mpage mappings are supported. 5237 */ 5238 static boolean_t 5239 pmap_is_referenced_pvh(struct md_page *pvh) 5240 { 5241 5242 pv_entry_t pv; 5243 pt1_entry_t pte1; 5244 pt2_entry_t pte2; 5245 pmap_t pmap; 5246 boolean_t rv; 5247 5248 rw_assert(&pvh_global_lock, RA_WLOCKED); 5249 rv = FALSE; 5250 sched_pin(); 5251 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5252 pmap = PV_PMAP(pv); 5253 PMAP_LOCK(pmap); 5254 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5255 if (pte1_is_section(pte1)) { 5256 rv = (pte1 & (PTE1_A | PTE1_V)) == (PTE1_A | PTE1_V); 5257 } else { 5258 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5259 rv = (pte2 & (PTE2_A | PTE2_V)) == (PTE2_A | PTE2_V); 5260 } 5261 PMAP_UNLOCK(pmap); 5262 if (rv) 5263 break; 5264 } 5265 sched_unpin(); 5266 return (rv); 5267 } 5268 5269 /* 5270 * pmap_is_referenced: 5271 * 5272 * Return whether or not the specified physical page was referenced 5273 * in any physical maps. 5274 */ 5275 boolean_t 5276 pmap_is_referenced(vm_page_t m) 5277 { 5278 boolean_t rv; 5279 5280 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5281 ("%s: page %p is not managed", __func__, m)); 5282 rw_wlock(&pvh_global_lock); 5283 rv = pmap_is_referenced_pvh(&m->md) || 5284 ((m->flags & PG_FICTITIOUS) == 0 && 5285 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5286 rw_wunlock(&pvh_global_lock); 5287 return (rv); 5288 } 5289 5290 /* 5291 * pmap_ts_referenced: 5292 * 5293 * Return a count of reference bits for a page, clearing those bits. 5294 * It is not necessary for every reference bit to be cleared, but it 5295 * is necessary that 0 only be returned when there are truly no 5296 * reference bits set. 5297 * 5298 * As an optimization, update the page's dirty field if a modified bit is 5299 * found while counting reference bits. This opportunistic update can be 5300 * performed at low cost and can eliminate the need for some future calls 5301 * to pmap_is_modified(). However, since this function stops after 5302 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5303 * dirty pages. Those dirty pages will only be detected by a future call 5304 * to pmap_is_modified(). 5305 */ 5306 int 5307 pmap_ts_referenced(vm_page_t m) 5308 { 5309 struct md_page *pvh; 5310 pv_entry_t pv, pvf; 5311 pmap_t pmap; 5312 pt1_entry_t *pte1p, opte1; 5313 pt2_entry_t *pte2p, opte2; 5314 vm_paddr_t pa; 5315 int rtval = 0; 5316 5317 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5318 ("%s: page %p is not managed", __func__, m)); 5319 pa = VM_PAGE_TO_PHYS(m); 5320 pvh = pa_to_pvh(pa); 5321 rw_wlock(&pvh_global_lock); 5322 sched_pin(); 5323 if ((m->flags & PG_FICTITIOUS) != 0 || 5324 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5325 goto small_mappings; 5326 pv = pvf; 5327 do { 5328 pmap = PV_PMAP(pv); 5329 PMAP_LOCK(pmap); 5330 pte1p = pmap_pte1(pmap, pv->pv_va); 5331 opte1 = pte1_load(pte1p); 5332 if (pte1_is_dirty(opte1)) { 5333 /* 5334 * Although "opte1" is mapping a 1MB page, because 5335 * this function is called at a 4KB page granularity, 5336 * we only update the 4KB page under test. 5337 */ 5338 vm_page_dirty(m); 5339 } 5340 if ((opte1 & PTE1_A) != 0) { 5341 /* 5342 * Since this reference bit is shared by 256 4KB pages, 5343 * it should not be cleared every time it is tested. 5344 * Apply a simple "hash" function on the physical page 5345 * number, the virtual section number, and the pmap 5346 * address to select one 4KB page out of the 256 5347 * on which testing the reference bit will result 5348 * in clearing that bit. This function is designed 5349 * to avoid the selection of the same 4KB page 5350 * for every 1MB page mapping. 5351 * 5352 * On demotion, a mapping that hasn't been referenced 5353 * is simply destroyed. To avoid the possibility of a 5354 * subsequent page fault on a demoted wired mapping, 5355 * always leave its reference bit set. Moreover, 5356 * since the section is wired, the current state of 5357 * its reference bit won't affect page replacement. 5358 */ 5359 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PTE1_SHIFT) ^ 5360 (uintptr_t)pmap) & (NPTE2_IN_PG - 1)) == 0 && 5361 !pte1_is_wired(opte1)) { 5362 pte1_clear_bit(pte1p, PTE1_A); 5363 pmap_tlb_flush(pmap, pv->pv_va); 5364 } 5365 rtval++; 5366 } 5367 PMAP_UNLOCK(pmap); 5368 /* Rotate the PV list if it has more than one entry. */ 5369 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5370 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5371 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5372 } 5373 if (rtval >= PMAP_TS_REFERENCED_MAX) 5374 goto out; 5375 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5376 small_mappings: 5377 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5378 goto out; 5379 pv = pvf; 5380 do { 5381 pmap = PV_PMAP(pv); 5382 PMAP_LOCK(pmap); 5383 pte1p = pmap_pte1(pmap, pv->pv_va); 5384 KASSERT(pte1_is_link(pte1_load(pte1p)), 5385 ("%s: not found a link in page %p's pv list", __func__, m)); 5386 5387 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5388 opte2 = pte2_load(pte2p); 5389 if (pte2_is_dirty(opte2)) 5390 vm_page_dirty(m); 5391 if ((opte2 & PTE2_A) != 0) { 5392 pte2_clear_bit(pte2p, PTE2_A); 5393 pmap_tlb_flush(pmap, pv->pv_va); 5394 rtval++; 5395 } 5396 PMAP_UNLOCK(pmap); 5397 /* Rotate the PV list if it has more than one entry. */ 5398 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5399 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5400 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5401 } 5402 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5403 PMAP_TS_REFERENCED_MAX); 5404 out: 5405 sched_unpin(); 5406 rw_wunlock(&pvh_global_lock); 5407 return (rtval); 5408 } 5409 5410 /* 5411 * Clear the wired attribute from the mappings for the specified range of 5412 * addresses in the given pmap. Every valid mapping within that range 5413 * must have the wired attribute set. In contrast, invalid mappings 5414 * cannot have the wired attribute set, so they are ignored. 5415 * 5416 * The wired attribute of the page table entry is not a hardware feature, 5417 * so there is no need to invalidate any TLB entries. 5418 */ 5419 void 5420 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5421 { 5422 vm_offset_t nextva; 5423 pt1_entry_t *pte1p, pte1; 5424 pt2_entry_t *pte2p, pte2; 5425 boolean_t pv_lists_locked; 5426 5427 if (pmap_is_current(pmap)) 5428 pv_lists_locked = FALSE; 5429 else { 5430 pv_lists_locked = TRUE; 5431 resume: 5432 rw_wlock(&pvh_global_lock); 5433 sched_pin(); 5434 } 5435 PMAP_LOCK(pmap); 5436 for (; sva < eva; sva = nextva) { 5437 nextva = pte1_trunc(sva + PTE1_SIZE); 5438 if (nextva < sva) 5439 nextva = eva; 5440 5441 pte1p = pmap_pte1(pmap, sva); 5442 pte1 = pte1_load(pte1p); 5443 5444 /* 5445 * Weed out invalid mappings. Note: we assume that L1 page 5446 * page table is always allocated, and in kernel virtual. 5447 */ 5448 if (pte1 == 0) 5449 continue; 5450 5451 if (pte1_is_section(pte1)) { 5452 if (!pte1_is_wired(pte1)) 5453 panic("%s: pte1 %#x not wired", __func__, pte1); 5454 5455 /* 5456 * Are we unwiring the entire large page? If not, 5457 * demote the mapping and fall through. 5458 */ 5459 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 5460 pte1_clear_bit(pte1p, PTE1_W); 5461 pmap->pm_stats.wired_count -= PTE1_SIZE / 5462 PAGE_SIZE; 5463 continue; 5464 } else { 5465 if (!pv_lists_locked) { 5466 pv_lists_locked = TRUE; 5467 if (!rw_try_wlock(&pvh_global_lock)) { 5468 PMAP_UNLOCK(pmap); 5469 /* Repeat sva. */ 5470 goto resume; 5471 } 5472 sched_pin(); 5473 } 5474 if (!pmap_demote_pte1(pmap, pte1p, sva)) 5475 panic("%s: demotion failed", __func__); 5476 #ifdef INVARIANTS 5477 else { 5478 /* Update pte1 after demotion */ 5479 pte1 = pte1_load(pte1p); 5480 } 5481 #endif 5482 } 5483 } 5484 5485 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5486 " is not link", __func__, pmap, sva, pte1, pte1p)); 5487 5488 /* 5489 * Limit our scan to either the end of the va represented 5490 * by the current L2 page table page, or to the end of the 5491 * range being protected. 5492 */ 5493 if (nextva > eva) 5494 nextva = eva; 5495 5496 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5497 sva += PAGE_SIZE) { 5498 pte2 = pte2_load(pte2p); 5499 if (!pte2_is_valid(pte2)) 5500 continue; 5501 if (!pte2_is_wired(pte2)) 5502 panic("%s: pte2 %#x is missing PTE2_W", 5503 __func__, pte2); 5504 5505 /* 5506 * PTE2_W must be cleared atomically. Although the pmap 5507 * lock synchronizes access to PTE2_W, another processor 5508 * could be changing PTE2_NM and/or PTE2_A concurrently. 5509 */ 5510 pte2_clear_bit(pte2p, PTE2_W); 5511 pmap->pm_stats.wired_count--; 5512 } 5513 } 5514 if (pv_lists_locked) { 5515 sched_unpin(); 5516 rw_wunlock(&pvh_global_lock); 5517 } 5518 PMAP_UNLOCK(pmap); 5519 } 5520 5521 /* 5522 * Clear the write and modified bits in each of the given page's mappings. 5523 */ 5524 void 5525 pmap_remove_write(vm_page_t m) 5526 { 5527 struct md_page *pvh; 5528 pv_entry_t next_pv, pv; 5529 pmap_t pmap; 5530 pt1_entry_t *pte1p; 5531 pt2_entry_t *pte2p, opte2; 5532 vm_offset_t va; 5533 5534 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5535 ("%s: page %p is not managed", __func__, m)); 5536 vm_page_assert_busied(m); 5537 5538 if (!pmap_page_is_write_mapped(m)) 5539 return; 5540 rw_wlock(&pvh_global_lock); 5541 sched_pin(); 5542 if ((m->flags & PG_FICTITIOUS) != 0) 5543 goto small_mappings; 5544 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5545 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5546 va = pv->pv_va; 5547 pmap = PV_PMAP(pv); 5548 PMAP_LOCK(pmap); 5549 pte1p = pmap_pte1(pmap, va); 5550 if (!(pte1_load(pte1p) & PTE1_RO)) 5551 (void)pmap_demote_pte1(pmap, pte1p, va); 5552 PMAP_UNLOCK(pmap); 5553 } 5554 small_mappings: 5555 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5556 pmap = PV_PMAP(pv); 5557 PMAP_LOCK(pmap); 5558 pte1p = pmap_pte1(pmap, pv->pv_va); 5559 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5560 " a section in page %p's pv list", __func__, m)); 5561 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5562 opte2 = pte2_load(pte2p); 5563 if (!(opte2 & PTE2_RO)) { 5564 pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM); 5565 if (pte2_is_dirty(opte2)) 5566 vm_page_dirty(m); 5567 pmap_tlb_flush(pmap, pv->pv_va); 5568 } 5569 PMAP_UNLOCK(pmap); 5570 } 5571 vm_page_aflag_clear(m, PGA_WRITEABLE); 5572 sched_unpin(); 5573 rw_wunlock(&pvh_global_lock); 5574 } 5575 5576 /* 5577 * Apply the given advice to the specified range of addresses within the 5578 * given pmap. Depending on the advice, clear the referenced and/or 5579 * modified flags in each mapping and set the mapped page's dirty field. 5580 */ 5581 void 5582 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 5583 { 5584 pt1_entry_t *pte1p, opte1; 5585 pt2_entry_t *pte2p, pte2; 5586 vm_offset_t pdnxt; 5587 vm_page_t m; 5588 boolean_t pv_lists_locked; 5589 5590 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5591 return; 5592 if (pmap_is_current(pmap)) 5593 pv_lists_locked = FALSE; 5594 else { 5595 pv_lists_locked = TRUE; 5596 resume: 5597 rw_wlock(&pvh_global_lock); 5598 sched_pin(); 5599 } 5600 PMAP_LOCK(pmap); 5601 for (; sva < eva; sva = pdnxt) { 5602 pdnxt = pte1_trunc(sva + PTE1_SIZE); 5603 if (pdnxt < sva) 5604 pdnxt = eva; 5605 pte1p = pmap_pte1(pmap, sva); 5606 opte1 = pte1_load(pte1p); 5607 if (!pte1_is_valid(opte1)) /* XXX */ 5608 continue; 5609 else if (pte1_is_section(opte1)) { 5610 if (!pte1_is_managed(opte1)) 5611 continue; 5612 if (!pv_lists_locked) { 5613 pv_lists_locked = TRUE; 5614 if (!rw_try_wlock(&pvh_global_lock)) { 5615 PMAP_UNLOCK(pmap); 5616 goto resume; 5617 } 5618 sched_pin(); 5619 } 5620 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 5621 /* 5622 * The large page mapping was destroyed. 5623 */ 5624 continue; 5625 } 5626 5627 /* 5628 * Unless the page mappings are wired, remove the 5629 * mapping to a single page so that a subsequent 5630 * access may repromote. Since the underlying L2 page 5631 * table is fully populated, this removal never 5632 * frees a L2 page table page. 5633 */ 5634 if (!pte1_is_wired(opte1)) { 5635 pte2p = pmap_pte2_quick(pmap, sva); 5636 KASSERT(pte2_is_valid(pte2_load(pte2p)), 5637 ("%s: invalid PTE2", __func__)); 5638 pmap_remove_pte2(pmap, pte2p, sva, NULL); 5639 } 5640 } 5641 if (pdnxt > eva) 5642 pdnxt = eva; 5643 for (pte2p = pmap_pte2_quick(pmap, sva); sva != pdnxt; pte2p++, 5644 sva += PAGE_SIZE) { 5645 pte2 = pte2_load(pte2p); 5646 if (!pte2_is_valid(pte2) || !pte2_is_managed(pte2)) 5647 continue; 5648 else if (pte2_is_dirty(pte2)) { 5649 if (advice == MADV_DONTNEED) { 5650 /* 5651 * Future calls to pmap_is_modified() 5652 * can be avoided by making the page 5653 * dirty now. 5654 */ 5655 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 5656 vm_page_dirty(m); 5657 } 5658 pte2_set_bit(pte2p, PTE2_NM); 5659 pte2_clear_bit(pte2p, PTE2_A); 5660 } else if ((pte2 & PTE2_A) != 0) 5661 pte2_clear_bit(pte2p, PTE2_A); 5662 else 5663 continue; 5664 pmap_tlb_flush(pmap, sva); 5665 } 5666 } 5667 if (pv_lists_locked) { 5668 sched_unpin(); 5669 rw_wunlock(&pvh_global_lock); 5670 } 5671 PMAP_UNLOCK(pmap); 5672 } 5673 5674 /* 5675 * Clear the modify bits on the specified physical page. 5676 */ 5677 void 5678 pmap_clear_modify(vm_page_t m) 5679 { 5680 struct md_page *pvh; 5681 pv_entry_t next_pv, pv; 5682 pmap_t pmap; 5683 pt1_entry_t *pte1p, opte1; 5684 pt2_entry_t *pte2p, opte2; 5685 vm_offset_t va; 5686 5687 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5688 ("%s: page %p is not managed", __func__, m)); 5689 vm_page_assert_busied(m); 5690 5691 if (!pmap_page_is_write_mapped(m)) 5692 return; 5693 rw_wlock(&pvh_global_lock); 5694 sched_pin(); 5695 if ((m->flags & PG_FICTITIOUS) != 0) 5696 goto small_mappings; 5697 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5698 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5699 va = pv->pv_va; 5700 pmap = PV_PMAP(pv); 5701 PMAP_LOCK(pmap); 5702 pte1p = pmap_pte1(pmap, va); 5703 opte1 = pte1_load(pte1p); 5704 if (!(opte1 & PTE1_RO)) { 5705 if (pmap_demote_pte1(pmap, pte1p, va) && 5706 !pte1_is_wired(opte1)) { 5707 /* 5708 * Write protect the mapping to a 5709 * single page so that a subsequent 5710 * write access may repromote. 5711 */ 5712 va += VM_PAGE_TO_PHYS(m) - pte1_pa(opte1); 5713 pte2p = pmap_pte2_quick(pmap, va); 5714 opte2 = pte2_load(pte2p); 5715 if ((opte2 & PTE2_V)) { 5716 pte2_set_bit(pte2p, PTE2_NM | PTE2_RO); 5717 vm_page_dirty(m); 5718 pmap_tlb_flush(pmap, va); 5719 } 5720 } 5721 } 5722 PMAP_UNLOCK(pmap); 5723 } 5724 small_mappings: 5725 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5726 pmap = PV_PMAP(pv); 5727 PMAP_LOCK(pmap); 5728 pte1p = pmap_pte1(pmap, pv->pv_va); 5729 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5730 " a section in page %p's pv list", __func__, m)); 5731 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5732 if (pte2_is_dirty(pte2_load(pte2p))) { 5733 pte2_set_bit(pte2p, PTE2_NM); 5734 pmap_tlb_flush(pmap, pv->pv_va); 5735 } 5736 PMAP_UNLOCK(pmap); 5737 } 5738 sched_unpin(); 5739 rw_wunlock(&pvh_global_lock); 5740 } 5741 5742 5743 /* 5744 * Sets the memory attribute for the specified page. 5745 */ 5746 void 5747 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5748 { 5749 pt2_entry_t *cmap2_pte2p; 5750 vm_memattr_t oma; 5751 vm_paddr_t pa; 5752 struct pcpu *pc; 5753 5754 oma = m->md.pat_mode; 5755 m->md.pat_mode = ma; 5756 5757 CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, 5758 VM_PAGE_TO_PHYS(m), oma, ma); 5759 if ((m->flags & PG_FICTITIOUS) != 0) 5760 return; 5761 #if 0 5762 /* 5763 * If "m" is a normal page, flush it from the cache. 5764 * 5765 * First, try to find an existing mapping of the page by sf 5766 * buffer. sf_buf_invalidate_cache() modifies mapping and 5767 * flushes the cache. 5768 */ 5769 if (sf_buf_invalidate_cache(m, oma)) 5770 return; 5771 #endif 5772 /* 5773 * If page is not mapped by sf buffer, map the page 5774 * transient and do invalidation. 5775 */ 5776 if (ma != oma) { 5777 pa = VM_PAGE_TO_PHYS(m); 5778 sched_pin(); 5779 pc = get_pcpu(); 5780 cmap2_pte2p = pc->pc_cmap2_pte2p; 5781 mtx_lock(&pc->pc_cmap_lock); 5782 if (pte2_load(cmap2_pte2p) != 0) 5783 panic("%s: CMAP2 busy", __func__); 5784 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 5785 vm_memattr_to_pte2(ma))); 5786 dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); 5787 pte2_clear(cmap2_pte2p); 5788 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5789 sched_unpin(); 5790 mtx_unlock(&pc->pc_cmap_lock); 5791 } 5792 } 5793 5794 /* 5795 * Miscellaneous support routines follow 5796 */ 5797 5798 /* 5799 * Returns TRUE if the given page is mapped individually or as part of 5800 * a 1mpage. Otherwise, returns FALSE. 5801 */ 5802 boolean_t 5803 pmap_page_is_mapped(vm_page_t m) 5804 { 5805 boolean_t rv; 5806 5807 if ((m->oflags & VPO_UNMANAGED) != 0) 5808 return (FALSE); 5809 rw_wlock(&pvh_global_lock); 5810 rv = !TAILQ_EMPTY(&m->md.pv_list) || 5811 ((m->flags & PG_FICTITIOUS) == 0 && 5812 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 5813 rw_wunlock(&pvh_global_lock); 5814 return (rv); 5815 } 5816 5817 /* 5818 * Returns true if the pmap's pv is one of the first 5819 * 16 pvs linked to from this page. This count may 5820 * be changed upwards or downwards in the future; it 5821 * is only necessary that true be returned for a small 5822 * subset of pmaps for proper page aging. 5823 */ 5824 boolean_t 5825 pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 5826 { 5827 struct md_page *pvh; 5828 pv_entry_t pv; 5829 int loops = 0; 5830 boolean_t rv; 5831 5832 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5833 ("%s: page %p is not managed", __func__, m)); 5834 rv = FALSE; 5835 rw_wlock(&pvh_global_lock); 5836 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5837 if (PV_PMAP(pv) == pmap) { 5838 rv = TRUE; 5839 break; 5840 } 5841 loops++; 5842 if (loops >= 16) 5843 break; 5844 } 5845 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 5846 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5847 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5848 if (PV_PMAP(pv) == pmap) { 5849 rv = TRUE; 5850 break; 5851 } 5852 loops++; 5853 if (loops >= 16) 5854 break; 5855 } 5856 } 5857 rw_wunlock(&pvh_global_lock); 5858 return (rv); 5859 } 5860 5861 /* 5862 * pmap_zero_page zeros the specified hardware page by mapping 5863 * the page into KVM and using bzero to clear its contents. 5864 */ 5865 void 5866 pmap_zero_page(vm_page_t m) 5867 { 5868 pt2_entry_t *cmap2_pte2p; 5869 struct pcpu *pc; 5870 5871 sched_pin(); 5872 pc = get_pcpu(); 5873 cmap2_pte2p = pc->pc_cmap2_pte2p; 5874 mtx_lock(&pc->pc_cmap_lock); 5875 if (pte2_load(cmap2_pte2p) != 0) 5876 panic("%s: CMAP2 busy", __func__); 5877 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5878 vm_page_pte2_attr(m))); 5879 pagezero(pc->pc_cmap2_addr); 5880 pte2_clear(cmap2_pte2p); 5881 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5882 sched_unpin(); 5883 mtx_unlock(&pc->pc_cmap_lock); 5884 } 5885 5886 /* 5887 * pmap_zero_page_area zeros the specified hardware page by mapping 5888 * the page into KVM and using bzero to clear its contents. 5889 * 5890 * off and size may not cover an area beyond a single hardware page. 5891 */ 5892 void 5893 pmap_zero_page_area(vm_page_t m, int off, int size) 5894 { 5895 pt2_entry_t *cmap2_pte2p; 5896 struct pcpu *pc; 5897 5898 sched_pin(); 5899 pc = get_pcpu(); 5900 cmap2_pte2p = pc->pc_cmap2_pte2p; 5901 mtx_lock(&pc->pc_cmap_lock); 5902 if (pte2_load(cmap2_pte2p) != 0) 5903 panic("%s: CMAP2 busy", __func__); 5904 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5905 vm_page_pte2_attr(m))); 5906 if (off == 0 && size == PAGE_SIZE) 5907 pagezero(pc->pc_cmap2_addr); 5908 else 5909 bzero(pc->pc_cmap2_addr + off, size); 5910 pte2_clear(cmap2_pte2p); 5911 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5912 sched_unpin(); 5913 mtx_unlock(&pc->pc_cmap_lock); 5914 } 5915 5916 /* 5917 * pmap_copy_page copies the specified (machine independent) 5918 * page by mapping the page into virtual memory and using 5919 * bcopy to copy the page, one machine dependent page at a 5920 * time. 5921 */ 5922 void 5923 pmap_copy_page(vm_page_t src, vm_page_t dst) 5924 { 5925 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5926 struct pcpu *pc; 5927 5928 sched_pin(); 5929 pc = get_pcpu(); 5930 cmap1_pte2p = pc->pc_cmap1_pte2p; 5931 cmap2_pte2p = pc->pc_cmap2_pte2p; 5932 mtx_lock(&pc->pc_cmap_lock); 5933 if (pte2_load(cmap1_pte2p) != 0) 5934 panic("%s: CMAP1 busy", __func__); 5935 if (pte2_load(cmap2_pte2p) != 0) 5936 panic("%s: CMAP2 busy", __func__); 5937 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src), 5938 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src))); 5939 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst), 5940 PTE2_AP_KRW, vm_page_pte2_attr(dst))); 5941 bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE); 5942 pte2_clear(cmap1_pte2p); 5943 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5944 pte2_clear(cmap2_pte2p); 5945 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5946 sched_unpin(); 5947 mtx_unlock(&pc->pc_cmap_lock); 5948 } 5949 5950 int unmapped_buf_allowed = 1; 5951 5952 void 5953 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 5954 vm_offset_t b_offset, int xfersize) 5955 { 5956 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5957 vm_page_t a_pg, b_pg; 5958 char *a_cp, *b_cp; 5959 vm_offset_t a_pg_offset, b_pg_offset; 5960 struct pcpu *pc; 5961 int cnt; 5962 5963 sched_pin(); 5964 pc = get_pcpu(); 5965 cmap1_pte2p = pc->pc_cmap1_pte2p; 5966 cmap2_pte2p = pc->pc_cmap2_pte2p; 5967 mtx_lock(&pc->pc_cmap_lock); 5968 if (pte2_load(cmap1_pte2p) != 0) 5969 panic("pmap_copy_pages: CMAP1 busy"); 5970 if (pte2_load(cmap2_pte2p) != 0) 5971 panic("pmap_copy_pages: CMAP2 busy"); 5972 while (xfersize > 0) { 5973 a_pg = ma[a_offset >> PAGE_SHIFT]; 5974 a_pg_offset = a_offset & PAGE_MASK; 5975 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 5976 b_pg = mb[b_offset >> PAGE_SHIFT]; 5977 b_pg_offset = b_offset & PAGE_MASK; 5978 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 5979 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg), 5980 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg))); 5981 tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr); 5982 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg), 5983 PTE2_AP_KRW, vm_page_pte2_attr(b_pg))); 5984 tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr); 5985 a_cp = pc->pc_cmap1_addr + a_pg_offset; 5986 b_cp = pc->pc_cmap2_addr + b_pg_offset; 5987 bcopy(a_cp, b_cp, cnt); 5988 a_offset += cnt; 5989 b_offset += cnt; 5990 xfersize -= cnt; 5991 } 5992 pte2_clear(cmap1_pte2p); 5993 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5994 pte2_clear(cmap2_pte2p); 5995 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5996 sched_unpin(); 5997 mtx_unlock(&pc->pc_cmap_lock); 5998 } 5999 6000 vm_offset_t 6001 pmap_quick_enter_page(vm_page_t m) 6002 { 6003 struct pcpu *pc; 6004 pt2_entry_t *pte2p; 6005 6006 critical_enter(); 6007 pc = get_pcpu(); 6008 pte2p = pc->pc_qmap_pte2p; 6009 6010 KASSERT(pte2_load(pte2p) == 0, ("%s: PTE2 busy", __func__)); 6011 6012 pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 6013 vm_page_pte2_attr(m))); 6014 return (pc->pc_qmap_addr); 6015 } 6016 6017 void 6018 pmap_quick_remove_page(vm_offset_t addr) 6019 { 6020 struct pcpu *pc; 6021 pt2_entry_t *pte2p; 6022 6023 pc = get_pcpu(); 6024 pte2p = pc->pc_qmap_pte2p; 6025 6026 KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__)); 6027 KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__)); 6028 6029 pte2_clear(pte2p); 6030 tlb_flush(pc->pc_qmap_addr); 6031 critical_exit(); 6032 } 6033 6034 /* 6035 * Copy the range specified by src_addr/len 6036 * from the source map to the range dst_addr/len 6037 * in the destination map. 6038 * 6039 * This routine is only advisory and need not do anything. 6040 */ 6041 void 6042 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 6043 vm_offset_t src_addr) 6044 { 6045 struct spglist free; 6046 vm_offset_t addr; 6047 vm_offset_t end_addr = src_addr + len; 6048 vm_offset_t nextva; 6049 6050 if (dst_addr != src_addr) 6051 return; 6052 6053 if (!pmap_is_current(src_pmap)) 6054 return; 6055 6056 rw_wlock(&pvh_global_lock); 6057 if (dst_pmap < src_pmap) { 6058 PMAP_LOCK(dst_pmap); 6059 PMAP_LOCK(src_pmap); 6060 } else { 6061 PMAP_LOCK(src_pmap); 6062 PMAP_LOCK(dst_pmap); 6063 } 6064 sched_pin(); 6065 for (addr = src_addr; addr < end_addr; addr = nextva) { 6066 pt2_entry_t *src_pte2p, *dst_pte2p; 6067 vm_page_t dst_mpt2pg, src_mpt2pg; 6068 pt1_entry_t src_pte1; 6069 u_int pte1_idx; 6070 6071 KASSERT(addr < VM_MAXUSER_ADDRESS, 6072 ("%s: invalid to pmap_copy page tables", __func__)); 6073 6074 nextva = pte1_trunc(addr + PTE1_SIZE); 6075 if (nextva < addr) 6076 nextva = end_addr; 6077 6078 pte1_idx = pte1_index(addr); 6079 src_pte1 = src_pmap->pm_pt1[pte1_idx]; 6080 if (pte1_is_section(src_pte1)) { 6081 if ((addr & PTE1_OFFSET) != 0 || 6082 (addr + PTE1_SIZE) > end_addr) 6083 continue; 6084 if (dst_pmap->pm_pt1[pte1_idx] == 0 && 6085 (!pte1_is_managed(src_pte1) || 6086 pmap_pv_insert_pte1(dst_pmap, addr, src_pte1, 6087 PMAP_ENTER_NORECLAIM))) { 6088 dst_pmap->pm_pt1[pte1_idx] = src_pte1 & 6089 ~PTE1_W; 6090 dst_pmap->pm_stats.resident_count += 6091 PTE1_SIZE / PAGE_SIZE; 6092 pmap_pte1_mappings++; 6093 } 6094 continue; 6095 } else if (!pte1_is_link(src_pte1)) 6096 continue; 6097 6098 src_mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(src_pte1)); 6099 6100 /* 6101 * We leave PT2s to be linked from PT1 even if they are not 6102 * referenced until all PT2s in a page are without reference. 6103 * 6104 * QQQ: It could be changed ... 6105 */ 6106 #if 0 /* single_pt2_link_is_cleared */ 6107 KASSERT(pt2_wirecount_get(src_mpt2pg, pte1_idx) > 0, 6108 ("%s: source page table page is unused", __func__)); 6109 #else 6110 if (pt2_wirecount_get(src_mpt2pg, pte1_idx) == 0) 6111 continue; 6112 #endif 6113 if (nextva > end_addr) 6114 nextva = end_addr; 6115 6116 src_pte2p = pt2map_entry(addr); 6117 while (addr < nextva) { 6118 pt2_entry_t temp_pte2; 6119 temp_pte2 = pte2_load(src_pte2p); 6120 /* 6121 * we only virtual copy managed pages 6122 */ 6123 if (pte2_is_managed(temp_pte2)) { 6124 dst_mpt2pg = pmap_allocpte2(dst_pmap, addr, 6125 PMAP_ENTER_NOSLEEP); 6126 if (dst_mpt2pg == NULL) 6127 goto out; 6128 dst_pte2p = pmap_pte2_quick(dst_pmap, addr); 6129 if (!pte2_is_valid(pte2_load(dst_pte2p)) && 6130 pmap_try_insert_pv_entry(dst_pmap, addr, 6131 PHYS_TO_VM_PAGE(pte2_pa(temp_pte2)))) { 6132 /* 6133 * Clear the wired, modified, and 6134 * accessed (referenced) bits 6135 * during the copy. 6136 */ 6137 temp_pte2 &= ~(PTE2_W | PTE2_A); 6138 temp_pte2 |= PTE2_NM; 6139 pte2_store(dst_pte2p, temp_pte2); 6140 dst_pmap->pm_stats.resident_count++; 6141 } else { 6142 SLIST_INIT(&free); 6143 if (pmap_unwire_pt2(dst_pmap, addr, 6144 dst_mpt2pg, &free)) { 6145 pmap_tlb_flush(dst_pmap, addr); 6146 vm_page_free_pages_toq(&free, 6147 false); 6148 } 6149 goto out; 6150 } 6151 if (pt2_wirecount_get(dst_mpt2pg, pte1_idx) >= 6152 pt2_wirecount_get(src_mpt2pg, pte1_idx)) 6153 break; 6154 } 6155 addr += PAGE_SIZE; 6156 src_pte2p++; 6157 } 6158 } 6159 out: 6160 sched_unpin(); 6161 rw_wunlock(&pvh_global_lock); 6162 PMAP_UNLOCK(src_pmap); 6163 PMAP_UNLOCK(dst_pmap); 6164 } 6165 6166 /* 6167 * Increase the starting virtual address of the given mapping if a 6168 * different alignment might result in more section mappings. 6169 */ 6170 void 6171 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 6172 vm_offset_t *addr, vm_size_t size) 6173 { 6174 vm_offset_t pte1_offset; 6175 6176 if (size < PTE1_SIZE) 6177 return; 6178 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 6179 offset += ptoa(object->pg_color); 6180 pte1_offset = offset & PTE1_OFFSET; 6181 if (size - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) < PTE1_SIZE || 6182 (*addr & PTE1_OFFSET) == pte1_offset) 6183 return; 6184 if ((*addr & PTE1_OFFSET) < pte1_offset) 6185 *addr = pte1_trunc(*addr) + pte1_offset; 6186 else 6187 *addr = pte1_roundup(*addr) + pte1_offset; 6188 } 6189 6190 void 6191 pmap_activate(struct thread *td) 6192 { 6193 pmap_t pmap, oldpmap; 6194 u_int cpuid, ttb; 6195 6196 PDEBUG(9, printf("%s: td = %08x\n", __func__, (uint32_t)td)); 6197 6198 critical_enter(); 6199 pmap = vmspace_pmap(td->td_proc->p_vmspace); 6200 oldpmap = PCPU_GET(curpmap); 6201 cpuid = PCPU_GET(cpuid); 6202 6203 #if defined(SMP) 6204 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 6205 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 6206 #else 6207 CPU_CLR(cpuid, &oldpmap->pm_active); 6208 CPU_SET(cpuid, &pmap->pm_active); 6209 #endif 6210 6211 ttb = pmap_ttb_get(pmap); 6212 6213 /* 6214 * pmap_activate is for the current thread on the current cpu 6215 */ 6216 td->td_pcb->pcb_pagedir = ttb; 6217 cp15_ttbr_set(ttb); 6218 PCPU_SET(curpmap, pmap); 6219 critical_exit(); 6220 } 6221 6222 /* 6223 * Perform the pmap work for mincore(2). If the page is not both referenced and 6224 * modified by this pmap, returns its physical address so that the caller can 6225 * find other mappings. 6226 */ 6227 int 6228 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) 6229 { 6230 pt1_entry_t *pte1p, pte1; 6231 pt2_entry_t *pte2p, pte2; 6232 vm_paddr_t pa; 6233 bool managed; 6234 int val; 6235 6236 PMAP_LOCK(pmap); 6237 pte1p = pmap_pte1(pmap, addr); 6238 pte1 = pte1_load(pte1p); 6239 if (pte1_is_section(pte1)) { 6240 pa = trunc_page(pte1_pa(pte1) | (addr & PTE1_OFFSET)); 6241 managed = pte1_is_managed(pte1); 6242 val = MINCORE_SUPER | MINCORE_INCORE; 6243 if (pte1_is_dirty(pte1)) 6244 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6245 if (pte1 & PTE1_A) 6246 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6247 } else if (pte1_is_link(pte1)) { 6248 pte2p = pmap_pte2(pmap, addr); 6249 pte2 = pte2_load(pte2p); 6250 pmap_pte2_release(pte2p); 6251 pa = pte2_pa(pte2); 6252 managed = pte2_is_managed(pte2); 6253 val = MINCORE_INCORE; 6254 if (pte2_is_dirty(pte2)) 6255 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6256 if (pte2 & PTE2_A) 6257 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6258 } else { 6259 managed = false; 6260 val = 0; 6261 } 6262 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 6263 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 6264 *pap = pa; 6265 } 6266 PMAP_UNLOCK(pmap); 6267 return (val); 6268 } 6269 6270 void 6271 pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) 6272 { 6273 vm_offset_t sva; 6274 uint32_t l2attr; 6275 6276 KASSERT((size & PAGE_MASK) == 0, 6277 ("%s: device mapping not page-sized", __func__)); 6278 6279 sva = va; 6280 l2attr = vm_memattr_to_pte2(VM_MEMATTR_DEVICE); 6281 while (size != 0) { 6282 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, l2attr); 6283 va += PAGE_SIZE; 6284 pa += PAGE_SIZE; 6285 size -= PAGE_SIZE; 6286 } 6287 tlb_flush_range(sva, va - sva); 6288 } 6289 6290 void 6291 pmap_kremove_device(vm_offset_t va, vm_size_t size) 6292 { 6293 vm_offset_t sva; 6294 6295 KASSERT((size & PAGE_MASK) == 0, 6296 ("%s: device mapping not page-sized", __func__)); 6297 6298 sva = va; 6299 while (size != 0) { 6300 pmap_kremove(va); 6301 va += PAGE_SIZE; 6302 size -= PAGE_SIZE; 6303 } 6304 tlb_flush_range(sva, va - sva); 6305 } 6306 6307 void 6308 pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb) 6309 { 6310 6311 pcb->pcb_pagedir = pmap_ttb_get(pmap); 6312 } 6313 6314 6315 /* 6316 * Clean L1 data cache range by physical address. 6317 * The range must be within a single page. 6318 */ 6319 static void 6320 pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr) 6321 { 6322 pt2_entry_t *cmap2_pte2p; 6323 struct pcpu *pc; 6324 6325 KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE, 6326 ("%s: not on single page", __func__)); 6327 6328 sched_pin(); 6329 pc = get_pcpu(); 6330 cmap2_pte2p = pc->pc_cmap2_pte2p; 6331 mtx_lock(&pc->pc_cmap_lock); 6332 if (pte2_load(cmap2_pte2p) != 0) 6333 panic("%s: CMAP2 busy", __func__); 6334 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr)); 6335 dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size); 6336 pte2_clear(cmap2_pte2p); 6337 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6338 sched_unpin(); 6339 mtx_unlock(&pc->pc_cmap_lock); 6340 } 6341 6342 /* 6343 * Sync instruction cache range which is not mapped yet. 6344 */ 6345 void 6346 cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 6347 { 6348 uint32_t len, offset; 6349 vm_page_t m; 6350 6351 /* Write back d-cache on given address range. */ 6352 offset = pa & PAGE_MASK; 6353 for ( ; size != 0; size -= len, pa += len, offset = 0) { 6354 len = min(PAGE_SIZE - offset, size); 6355 m = PHYS_TO_VM_PAGE(pa); 6356 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6357 __func__, pa)); 6358 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6359 } 6360 /* 6361 * I-cache is VIPT. Only way how to flush all virtual mappings 6362 * on given physical address is to invalidate all i-cache. 6363 */ 6364 icache_inv_all(); 6365 } 6366 6367 void 6368 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t size) 6369 { 6370 6371 /* Write back d-cache on given address range. */ 6372 if (va >= VM_MIN_KERNEL_ADDRESS) { 6373 dcache_wb_pou(va, size); 6374 } else { 6375 uint32_t len, offset; 6376 vm_paddr_t pa; 6377 vm_page_t m; 6378 6379 offset = va & PAGE_MASK; 6380 for ( ; size != 0; size -= len, va += len, offset = 0) { 6381 pa = pmap_extract(pmap, va); /* offset is preserved */ 6382 len = min(PAGE_SIZE - offset, size); 6383 m = PHYS_TO_VM_PAGE(pa); 6384 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6385 __func__, pa)); 6386 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6387 } 6388 } 6389 /* 6390 * I-cache is VIPT. Only way how to flush all virtual mappings 6391 * on given physical address is to invalidate all i-cache. 6392 */ 6393 icache_inv_all(); 6394 } 6395 6396 /* 6397 * The implementation of pmap_fault() uses IN_RANGE2() macro which 6398 * depends on the fact that given range size is a power of 2. 6399 */ 6400 CTASSERT(powerof2(NB_IN_PT1)); 6401 CTASSERT(powerof2(PT2MAP_SIZE)); 6402 6403 #define IN_RANGE2(addr, start, size) \ 6404 ((vm_offset_t)(start) == ((vm_offset_t)(addr) & ~((size) - 1))) 6405 6406 /* 6407 * Handle access and R/W emulation faults. 6408 */ 6409 int 6410 pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode) 6411 { 6412 pt1_entry_t *pte1p, pte1; 6413 pt2_entry_t *pte2p, pte2; 6414 6415 if (pmap == NULL) 6416 pmap = kernel_pmap; 6417 6418 /* 6419 * In kernel, we should never get abort with FAR which is in range of 6420 * pmap->pm_pt1 or PT2MAP address spaces. If it happens, stop here 6421 * and print out a useful abort message and even get to the debugger 6422 * otherwise it likely ends with never ending loop of aborts. 6423 */ 6424 if (__predict_false(IN_RANGE2(far, pmap->pm_pt1, NB_IN_PT1))) { 6425 /* 6426 * All L1 tables should always be mapped and present. 6427 * However, we check only current one herein. For user mode, 6428 * only permission abort from malicious user is not fatal. 6429 * And alignment abort as it may have higher priority. 6430 */ 6431 if (!usermode || (idx != FAULT_ALIGN && idx != FAULT_PERM_L2)) { 6432 CTR4(KTR_PMAP, "%s: pmap %#x pm_pt1 %#x far %#x", 6433 __func__, pmap, pmap->pm_pt1, far); 6434 panic("%s: pm_pt1 abort", __func__); 6435 } 6436 return (KERN_INVALID_ADDRESS); 6437 } 6438 if (__predict_false(IN_RANGE2(far, PT2MAP, PT2MAP_SIZE))) { 6439 /* 6440 * PT2MAP should be always mapped and present in current 6441 * L1 table. However, only existing L2 tables are mapped 6442 * in PT2MAP. For user mode, only L2 translation abort and 6443 * permission abort from malicious user is not fatal. 6444 * And alignment abort as it may have higher priority. 6445 */ 6446 if (!usermode || (idx != FAULT_ALIGN && 6447 idx != FAULT_TRAN_L2 && idx != FAULT_PERM_L2)) { 6448 CTR4(KTR_PMAP, "%s: pmap %#x PT2MAP %#x far %#x", 6449 __func__, pmap, PT2MAP, far); 6450 panic("%s: PT2MAP abort", __func__); 6451 } 6452 return (KERN_INVALID_ADDRESS); 6453 } 6454 6455 /* 6456 * A pmap lock is used below for handling of access and R/W emulation 6457 * aborts. They were handled by atomic operations before so some 6458 * analysis of new situation is needed to answer the following question: 6459 * Is it safe to use the lock even for these aborts? 6460 * 6461 * There may happen two cases in general: 6462 * 6463 * (1) Aborts while the pmap lock is locked already - this should not 6464 * happen as pmap lock is not recursive. However, under pmap lock only 6465 * internal kernel data should be accessed and such data should be 6466 * mapped with A bit set and NM bit cleared. If double abort happens, 6467 * then a mapping of data which has caused it must be fixed. Further, 6468 * all new mappings are always made with A bit set and the bit can be 6469 * cleared only on managed mappings. 6470 * 6471 * (2) Aborts while another lock(s) is/are locked - this already can 6472 * happen. However, there is no difference here if it's either access or 6473 * R/W emulation abort, or if it's some other abort. 6474 */ 6475 6476 PMAP_LOCK(pmap); 6477 #ifdef INVARIANTS 6478 pte1 = pte1_load(pmap_pte1(pmap, far)); 6479 if (pte1_is_link(pte1)) { 6480 /* 6481 * Check in advance that associated L2 page table is mapped into 6482 * PT2MAP space. Note that faulty access to not mapped L2 page 6483 * table is caught in more general check above where "far" is 6484 * checked that it does not lay in PT2MAP space. Note also that 6485 * L1 page table and PT2TAB always exist and are mapped. 6486 */ 6487 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far)); 6488 if (!pte2_is_valid(pte2)) 6489 panic("%s: missing L2 page table (%p, %#x)", 6490 __func__, pmap, far); 6491 } 6492 #endif 6493 #ifdef SMP 6494 /* 6495 * Special treatment is due to break-before-make approach done when 6496 * pte1 is updated for userland mapping during section promotion or 6497 * demotion. If not caught here, pmap_enter() can find a section 6498 * mapping on faulting address. That is not allowed. 6499 */ 6500 if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) { 6501 PMAP_UNLOCK(pmap); 6502 return (KERN_SUCCESS); 6503 } 6504 #endif 6505 /* 6506 * Accesss bits for page and section. Note that the entry 6507 * is not in TLB yet, so TLB flush is not necessary. 6508 * 6509 * QQQ: This is hardware emulation, we do not call userret() 6510 * for aborts from user mode. 6511 */ 6512 if (idx == FAULT_ACCESS_L2) { 6513 pte1 = pte1_load(pmap_pte1(pmap, far)); 6514 if (pte1_is_link(pte1)) { 6515 /* L2 page table should exist and be mapped. */ 6516 pte2p = pt2map_entry(far); 6517 pte2 = pte2_load(pte2p); 6518 if (pte2_is_valid(pte2)) { 6519 pte2_store(pte2p, pte2 | PTE2_A); 6520 PMAP_UNLOCK(pmap); 6521 return (KERN_SUCCESS); 6522 } 6523 } else { 6524 /* 6525 * We got L2 access fault but PTE1 is not a link. 6526 * Probably some race happened, do nothing. 6527 */ 6528 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L2 - pmap %#x far %#x", 6529 __func__, pmap, far); 6530 PMAP_UNLOCK(pmap); 6531 return (KERN_SUCCESS); 6532 } 6533 } 6534 if (idx == FAULT_ACCESS_L1) { 6535 pte1p = pmap_pte1(pmap, far); 6536 pte1 = pte1_load(pte1p); 6537 if (pte1_is_section(pte1)) { 6538 pte1_store(pte1p, pte1 | PTE1_A); 6539 PMAP_UNLOCK(pmap); 6540 return (KERN_SUCCESS); 6541 } else { 6542 /* 6543 * We got L1 access fault but PTE1 is not section 6544 * mapping. Probably some race happened, do nothing. 6545 */ 6546 CTR3(KTR_PMAP, "%s: FAULT_ACCESS_L1 - pmap %#x far %#x", 6547 __func__, pmap, far); 6548 PMAP_UNLOCK(pmap); 6549 return (KERN_SUCCESS); 6550 } 6551 } 6552 6553 /* 6554 * Handle modify bits for page and section. Note that the modify 6555 * bit is emulated by software. So PTEx_RO is software read only 6556 * bit and PTEx_NM flag is real hardware read only bit. 6557 * 6558 * QQQ: This is hardware emulation, we do not call userret() 6559 * for aborts from user mode. 6560 */ 6561 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) { 6562 pte1 = pte1_load(pmap_pte1(pmap, far)); 6563 if (pte1_is_link(pte1)) { 6564 /* L2 page table should exist and be mapped. */ 6565 pte2p = pt2map_entry(far); 6566 pte2 = pte2_load(pte2p); 6567 if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) && 6568 (pte2 & PTE2_NM)) { 6569 pte2_store(pte2p, pte2 & ~PTE2_NM); 6570 tlb_flush(trunc_page(far)); 6571 PMAP_UNLOCK(pmap); 6572 return (KERN_SUCCESS); 6573 } 6574 } else { 6575 /* 6576 * We got L2 permission fault but PTE1 is not a link. 6577 * Probably some race happened, do nothing. 6578 */ 6579 CTR3(KTR_PMAP, "%s: FAULT_PERM_L2 - pmap %#x far %#x", 6580 __func__, pmap, far); 6581 PMAP_UNLOCK(pmap); 6582 return (KERN_SUCCESS); 6583 } 6584 } 6585 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) { 6586 pte1p = pmap_pte1(pmap, far); 6587 pte1 = pte1_load(pte1p); 6588 if (pte1_is_section(pte1)) { 6589 if (!(pte1 & PTE1_RO) && (pte1 & PTE1_NM)) { 6590 pte1_store(pte1p, pte1 & ~PTE1_NM); 6591 tlb_flush(pte1_trunc(far)); 6592 PMAP_UNLOCK(pmap); 6593 return (KERN_SUCCESS); 6594 } 6595 } else { 6596 /* 6597 * We got L1 permission fault but PTE1 is not section 6598 * mapping. Probably some race happened, do nothing. 6599 */ 6600 CTR3(KTR_PMAP, "%s: FAULT_PERM_L1 - pmap %#x far %#x", 6601 __func__, pmap, far); 6602 PMAP_UNLOCK(pmap); 6603 return (KERN_SUCCESS); 6604 } 6605 } 6606 6607 /* 6608 * QQQ: The previous code, mainly fast handling of access and 6609 * modify bits aborts, could be moved to ASM. Now we are 6610 * starting to deal with not fast aborts. 6611 */ 6612 PMAP_UNLOCK(pmap); 6613 return (KERN_FAILURE); 6614 } 6615 6616 #if defined(PMAP_DEBUG) 6617 /* 6618 * Reusing of KVA used in pmap_zero_page function !!! 6619 */ 6620 static void 6621 pmap_zero_page_check(vm_page_t m) 6622 { 6623 pt2_entry_t *cmap2_pte2p; 6624 uint32_t *p, *end; 6625 struct pcpu *pc; 6626 6627 sched_pin(); 6628 pc = get_pcpu(); 6629 cmap2_pte2p = pc->pc_cmap2_pte2p; 6630 mtx_lock(&pc->pc_cmap_lock); 6631 if (pte2_load(cmap2_pte2p) != 0) 6632 panic("%s: CMAP2 busy", __func__); 6633 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 6634 vm_page_pte2_attr(m))); 6635 end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE); 6636 for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++) 6637 if (*p != 0) 6638 panic("%s: page %p not zero, va: %p", __func__, m, 6639 pc->pc_cmap2_addr); 6640 pte2_clear(cmap2_pte2p); 6641 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6642 sched_unpin(); 6643 mtx_unlock(&pc->pc_cmap_lock); 6644 } 6645 6646 int 6647 pmap_pid_dump(int pid) 6648 { 6649 pmap_t pmap; 6650 struct proc *p; 6651 int npte2 = 0; 6652 int i, j, index; 6653 6654 sx_slock(&allproc_lock); 6655 FOREACH_PROC_IN_SYSTEM(p) { 6656 if (p->p_pid != pid || p->p_vmspace == NULL) 6657 continue; 6658 index = 0; 6659 pmap = vmspace_pmap(p->p_vmspace); 6660 for (i = 0; i < NPTE1_IN_PT1; i++) { 6661 pt1_entry_t pte1; 6662 pt2_entry_t *pte2p, pte2; 6663 vm_offset_t base, va; 6664 vm_paddr_t pa; 6665 vm_page_t m; 6666 6667 base = i << PTE1_SHIFT; 6668 pte1 = pte1_load(&pmap->pm_pt1[i]); 6669 6670 if (pte1_is_section(pte1)) { 6671 /* 6672 * QQQ: Do something here! 6673 */ 6674 } else if (pte1_is_link(pte1)) { 6675 for (j = 0; j < NPTE2_IN_PT2; j++) { 6676 va = base + (j << PAGE_SHIFT); 6677 if (va >= VM_MIN_KERNEL_ADDRESS) { 6678 if (index) { 6679 index = 0; 6680 printf("\n"); 6681 } 6682 sx_sunlock(&allproc_lock); 6683 return (npte2); 6684 } 6685 pte2p = pmap_pte2(pmap, va); 6686 pte2 = pte2_load(pte2p); 6687 pmap_pte2_release(pte2p); 6688 if (!pte2_is_valid(pte2)) 6689 continue; 6690 6691 pa = pte2_pa(pte2); 6692 m = PHYS_TO_VM_PAGE(pa); 6693 printf("va: 0x%x, pa: 0x%x, w: %d, " 6694 "f: 0x%x", va, pa, 6695 m->ref_count, m->flags); 6696 npte2++; 6697 index++; 6698 if (index >= 2) { 6699 index = 0; 6700 printf("\n"); 6701 } else { 6702 printf(" "); 6703 } 6704 } 6705 } 6706 } 6707 } 6708 sx_sunlock(&allproc_lock); 6709 return (npte2); 6710 } 6711 6712 #endif 6713 6714 #ifdef DDB 6715 static pt2_entry_t * 6716 pmap_pte2_ddb(pmap_t pmap, vm_offset_t va) 6717 { 6718 pt1_entry_t pte1; 6719 vm_paddr_t pt2pg_pa; 6720 6721 pte1 = pte1_load(pmap_pte1(pmap, va)); 6722 if (!pte1_is_link(pte1)) 6723 return (NULL); 6724 6725 if (pmap_is_current(pmap)) 6726 return (pt2map_entry(va)); 6727 6728 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 6729 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 6730 if (pte2_pa(pte2_load(PMAP3)) != pt2pg_pa) { 6731 pte2_store(PMAP3, PTE2_KPT(pt2pg_pa)); 6732 #ifdef SMP 6733 PMAP3cpu = PCPU_GET(cpuid); 6734 #endif 6735 tlb_flush_local((vm_offset_t)PADDR3); 6736 } 6737 #ifdef SMP 6738 else if (PMAP3cpu != PCPU_GET(cpuid)) { 6739 PMAP3cpu = PCPU_GET(cpuid); 6740 tlb_flush_local((vm_offset_t)PADDR3); 6741 } 6742 #endif 6743 return (PADDR3 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 6744 } 6745 6746 static void 6747 dump_pmap(pmap_t pmap) 6748 { 6749 6750 printf("pmap %p\n", pmap); 6751 printf(" pm_pt1: %p\n", pmap->pm_pt1); 6752 printf(" pm_pt2tab: %p\n", pmap->pm_pt2tab); 6753 printf(" pm_active: 0x%08lX\n", pmap->pm_active.__bits[0]); 6754 } 6755 6756 DB_SHOW_COMMAND(pmaps, pmap_list_pmaps) 6757 { 6758 6759 pmap_t pmap; 6760 LIST_FOREACH(pmap, &allpmaps, pm_list) { 6761 dump_pmap(pmap); 6762 } 6763 } 6764 6765 static int 6766 pte2_class(pt2_entry_t pte2) 6767 { 6768 int cls; 6769 6770 cls = (pte2 >> 2) & 0x03; 6771 cls |= (pte2 >> 4) & 0x04; 6772 return (cls); 6773 } 6774 6775 static void 6776 dump_section(pmap_t pmap, uint32_t pte1_idx) 6777 { 6778 } 6779 6780 static void 6781 dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok) 6782 { 6783 uint32_t i; 6784 vm_offset_t va; 6785 pt2_entry_t *pte2p, pte2; 6786 vm_page_t m; 6787 6788 va = pte1_idx << PTE1_SHIFT; 6789 pte2p = pmap_pte2_ddb(pmap, va); 6790 for (i = 0; i < NPTE2_IN_PT2; i++, pte2p++, va += PAGE_SIZE) { 6791 pte2 = pte2_load(pte2p); 6792 if (pte2 == 0) 6793 continue; 6794 if (!pte2_is_valid(pte2)) { 6795 printf(" 0x%08X: 0x%08X", va, pte2); 6796 if (!invalid_ok) 6797 printf(" - not valid !!!"); 6798 printf("\n"); 6799 continue; 6800 } 6801 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 6802 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2, 6803 pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m); 6804 if (m != NULL) { 6805 printf(" v:%d w:%d f:0x%04X\n", m->valid, 6806 m->ref_count, m->flags); 6807 } else { 6808 printf("\n"); 6809 } 6810 } 6811 } 6812 6813 static __inline boolean_t 6814 is_pv_chunk_space(vm_offset_t va) 6815 { 6816 6817 if ((((vm_offset_t)pv_chunkbase) <= va) && 6818 (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks))) 6819 return (TRUE); 6820 return (FALSE); 6821 } 6822 6823 DB_SHOW_COMMAND(pmap, pmap_pmap_print) 6824 { 6825 /* XXX convert args. */ 6826 pmap_t pmap = (pmap_t)addr; 6827 pt1_entry_t pte1; 6828 pt2_entry_t pte2; 6829 vm_offset_t va, eva; 6830 vm_page_t m; 6831 uint32_t i; 6832 boolean_t invalid_ok, dump_link_ok, dump_pv_chunk; 6833 6834 if (have_addr) { 6835 pmap_t pm; 6836 6837 LIST_FOREACH(pm, &allpmaps, pm_list) 6838 if (pm == pmap) break; 6839 if (pm == NULL) { 6840 printf("given pmap %p is not in allpmaps list\n", pmap); 6841 return; 6842 } 6843 } else 6844 pmap = PCPU_GET(curpmap); 6845 6846 eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF; 6847 dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */ 6848 6849 printf("pmap: 0x%08X\n", (uint32_t)pmap); 6850 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6851 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6852 6853 for(i = 0; i < NPTE1_IN_PT1; i++) { 6854 pte1 = pte1_load(&pmap->pm_pt1[i]); 6855 if (pte1 == 0) 6856 continue; 6857 va = i << PTE1_SHIFT; 6858 if (va >= eva) 6859 break; 6860 6861 if (pte1_is_section(pte1)) { 6862 printf("0x%08X: Section 0x%08X, s:%d g:%d\n", va, pte1, 6863 !!(pte1 & PTE1_S), !(pte1 & PTE1_NG)); 6864 dump_section(pmap, i); 6865 } else if (pte1_is_link(pte1)) { 6866 dump_link_ok = TRUE; 6867 invalid_ok = FALSE; 6868 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6869 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 6870 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p", 6871 va, pte1, pte2, m); 6872 if (is_pv_chunk_space(va)) { 6873 printf(" - pv_chunk space"); 6874 if (dump_pv_chunk) 6875 invalid_ok = TRUE; 6876 else 6877 dump_link_ok = FALSE; 6878 } 6879 else if (m != NULL) 6880 printf(" w:%d w2:%u", m->ref_count, 6881 pt2_wirecount_get(m, pte1_index(va))); 6882 if (pte2 == 0) 6883 printf(" !!! pt2tab entry is ZERO"); 6884 else if (pte2_pa(pte1) != pte2_pa(pte2)) 6885 printf(" !!! pt2tab entry is DIFFERENT - m: %p", 6886 PHYS_TO_VM_PAGE(pte2_pa(pte2))); 6887 printf("\n"); 6888 if (dump_link_ok) 6889 dump_link(pmap, i, invalid_ok); 6890 } else 6891 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6892 } 6893 } 6894 6895 static void 6896 dump_pt2tab(pmap_t pmap) 6897 { 6898 uint32_t i; 6899 pt2_entry_t pte2; 6900 vm_offset_t va; 6901 vm_paddr_t pa; 6902 vm_page_t m; 6903 6904 printf("PT2TAB:\n"); 6905 for (i = 0; i < PT2TAB_ENTRIES; i++) { 6906 pte2 = pte2_load(&pmap->pm_pt2tab[i]); 6907 if (!pte2_is_valid(pte2)) 6908 continue; 6909 va = i << PT2TAB_SHIFT; 6910 pa = pte2_pa(pte2); 6911 m = PHYS_TO_VM_PAGE(pa); 6912 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2, 6913 pte2_class(pte2), !!(pte2 & PTE2_S), m); 6914 if (m != NULL) 6915 printf(" , w: %d, f: 0x%04X pidx: %lld", 6916 m->ref_count, m->flags, m->pindex); 6917 printf("\n"); 6918 } 6919 } 6920 6921 DB_SHOW_COMMAND(pmap_pt2tab, pmap_pt2tab_print) 6922 { 6923 /* XXX convert args. */ 6924 pmap_t pmap = (pmap_t)addr; 6925 pt1_entry_t pte1; 6926 pt2_entry_t pte2; 6927 vm_offset_t va; 6928 uint32_t i, start; 6929 6930 if (have_addr) { 6931 printf("supported only on current pmap\n"); 6932 return; 6933 } 6934 6935 pmap = PCPU_GET(curpmap); 6936 printf("curpmap: 0x%08X\n", (uint32_t)pmap); 6937 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6938 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6939 6940 start = pte1_index((vm_offset_t)PT2MAP); 6941 for (i = start; i < (start + NPT2_IN_PT2TAB); i++) { 6942 pte1 = pte1_load(&pmap->pm_pt1[i]); 6943 if (pte1 == 0) 6944 continue; 6945 va = i << PTE1_SHIFT; 6946 if (pte1_is_section(pte1)) { 6947 printf("0x%08X: Section 0x%08X, s:%d\n", va, pte1, 6948 !!(pte1 & PTE1_S)); 6949 dump_section(pmap, i); 6950 } else if (pte1_is_link(pte1)) { 6951 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6952 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X\n", va, 6953 pte1, pte2); 6954 if (pte2 == 0) 6955 printf(" !!! pt2tab entry is ZERO\n"); 6956 } else 6957 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6958 } 6959 dump_pt2tab(pmap); 6960 } 6961 #endif 6962