1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1994 David Greenman 5 * Copyright (c) 2003 Peter Wemm 6 * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu> 7 * Copyright (c) 2008, 2009 The DragonFly Project. 8 * Copyright (c) 2008, 2009 Jordan Gordeev. 9 * Copyright (c) 2011-2019 Matthew Dillon 10 * All rights reserved. 11 * 12 * This code is derived from software contributed to Berkeley by 13 * the Systems Programming Group of the University of Utah Computer 14 * Science Department and William Jolitz of UUNET Technologies Inc. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 */ 44 /* 45 * Manage physical address maps for x86-64 systems. 46 * 47 * Some notes: 48 * - The 'M'odified bit is only applicable to terminal PTEs. 49 * 50 * - The 'U'ser access bit can be set for higher-level PTEs as 51 * long as it isn't set for terminal PTEs for pages we don't 52 * want user access to. 53 */ 54 55 #if 0 /* JG */ 56 #include "opt_pmap.h" 57 #endif 58 #include "opt_msgbuf.h" 59 60 #include <sys/param.h> 61 #include <sys/kernel.h> 62 #include <sys/proc.h> 63 #include <sys/msgbuf.h> 64 #include <sys/vmmeter.h> 65 #include <sys/mman.h> 66 #include <sys/systm.h> 67 68 #include <vm/vm.h> 69 #include <vm/vm_param.h> 70 #include <sys/sysctl.h> 71 #include <sys/lock.h> 72 #include <vm/vm_kern.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_map.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_pageout.h> 78 #include <vm/vm_pager.h> 79 #include <vm/vm_zone.h> 80 81 #include <sys/thread2.h> 82 #include <sys/spinlock2.h> 83 #include <vm/vm_page2.h> 84 85 #include <machine/cputypes.h> 86 #include <machine/cpu.h> 87 #include <machine/md_var.h> 88 #include <machine/specialreg.h> 89 #include <machine/smp.h> 90 #include <machine_base/apic/apicreg.h> 91 #include <machine/globaldata.h> 92 #include <machine/pmap.h> 93 #include <machine/pmap_inval.h> 94 95 #include <ddb/ddb.h> 96 97 #define PMAP_KEEP_PDIRS 98 99 #if defined(DIAGNOSTIC) 100 #define PMAP_DIAGNOSTIC 101 #endif 102 103 #define MINPV 2048 104 105 /* 106 * pmap debugging will report who owns a pv lock when blocking. 107 */ 108 #ifdef PMAP_DEBUG 109 110 #define PMAP_DEBUG_DECL ,const char *func, int lineno 111 #define PMAP_DEBUG_ARGS , __func__, __LINE__ 112 #define PMAP_DEBUG_COPY , func, lineno 113 114 #define pv_get(pmap, pindex, pmarkp) _pv_get(pmap, pindex, pmarkp \ 115 PMAP_DEBUG_ARGS) 116 #define pv_lock(pv) _pv_lock(pv \ 117 PMAP_DEBUG_ARGS) 118 #define pv_hold_try(pv) _pv_hold_try(pv \ 119 PMAP_DEBUG_ARGS) 120 #define pv_alloc(pmap, pindex, isnewp) _pv_alloc(pmap, pindex, isnewp \ 121 PMAP_DEBUG_ARGS) 122 123 #define pv_free(pv, pvp) _pv_free(pv, pvp PMAP_DEBUG_ARGS) 124 125 #else 126 127 #define PMAP_DEBUG_DECL 128 #define PMAP_DEBUG_ARGS 129 #define PMAP_DEBUG_COPY 130 131 #define pv_get(pmap, pindex, pmarkp) _pv_get(pmap, pindex, pmarkp) 132 #define pv_lock(pv) _pv_lock(pv) 133 #define pv_hold_try(pv) _pv_hold_try(pv) 134 #define pv_alloc(pmap, pindex, isnewp) _pv_alloc(pmap, pindex, isnewp) 135 #define pv_free(pv, pvp) _pv_free(pv, pvp) 136 137 #endif 138 139 /* 140 * Get PDEs and PTEs for user/kernel address space 141 */ 142 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 143 144 #define pmap_pde_v(pmap, pte) \ 145 ((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0) 146 #define pmap_pte_w(pmap, pte) \ 147 ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0) 148 #define pmap_pte_m(pmap, pte) \ 149 ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0) 150 #define pmap_pte_u(pmap, pte) \ 151 ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0) 152 #define pmap_pte_v(pmap, pte) \ 153 ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0) 154 155 /* 156 * Given a map and a machine independent protection code, 157 * convert to a vax protection code. 158 */ 159 #define pte_prot(m, p) \ 160 (m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)]) 161 static uint64_t protection_codes[PROTECTION_CODES_SIZE]; 162 163 /* 164 * Backing scan macros. Note that in the use case 'ipte' is only a tentitive 165 * value and must be validated by a pmap_inval_smp_cmpset*() or equivalent 166 * function. 167 * 168 * NOTE: cpu_ccfence() is required to prevent excessive optmization of 169 * of the (ipte) variable. 170 * 171 * NOTE: We don't bother locking the backing object if it isn't mapped 172 * to anything (backing_list is empty). 173 * 174 * NOTE: For now guarantee an interlock via iobj->backing_lk if the 175 * object exists and do not shortcut the lock by checking to see 176 * if the list is empty first. 177 */ 178 #define PMAP_PAGE_BACKING_SCAN(m, match_pmap, ipmap, iptep, ipte, iva) \ 179 if (m->object) { \ 180 vm_object_t iobj = m->object; \ 181 vm_map_backing_t iba, next_ba; \ 182 struct pmap *ipmap; \ 183 pt_entry_t ipte; \ 184 pt_entry_t *iptep; \ 185 vm_offset_t iva; \ 186 vm_pindex_t ipindex_start; \ 187 vm_pindex_t ipindex_end; \ 188 \ 189 lockmgr(&iobj->backing_lk, LK_SHARED); \ 190 next_ba = TAILQ_FIRST(&iobj->backing_list); \ 191 while ((iba = next_ba) != NULL) { \ 192 next_ba = TAILQ_NEXT(iba, entry); \ 193 ipmap = iba->pmap; \ 194 if (match_pmap && ipmap != match_pmap) \ 195 continue; \ 196 ipindex_start = iba->offset >> PAGE_SHIFT; \ 197 ipindex_end = ipindex_start + \ 198 ((iba->end - iba->start) >> PAGE_SHIFT); \ 199 if (m->pindex < ipindex_start || \ 200 m->pindex >= ipindex_end) { \ 201 continue; \ 202 } \ 203 iva = iba->start + \ 204 ((m->pindex - ipindex_start) << PAGE_SHIFT); \ 205 iptep = pmap_pte(ipmap, iva); \ 206 if (iptep == NULL) \ 207 continue; \ 208 ipte = *iptep; \ 209 cpu_ccfence(); \ 210 if (m->phys_addr != (ipte & PG_FRAME)) \ 211 continue; \ 212 213 #define PMAP_PAGE_BACKING_RETRY \ 214 { \ 215 next_ba = iba; \ 216 continue; \ 217 } \ 218 219 #define PMAP_PAGE_BACKING_DONE \ 220 } \ 221 lockmgr(&iobj->backing_lk, LK_RELEASE); \ 222 } \ 223 224 struct pmap kernel_pmap; 225 static struct pmap iso_pmap; 226 227 vm_paddr_t avail_start; /* PA of first available physical page */ 228 vm_paddr_t avail_end; /* PA of last available physical page */ 229 vm_offset_t virtual2_start; /* cutout free area prior to kernel start */ 230 vm_offset_t virtual2_end; 231 vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */ 232 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 233 vm_offset_t KvaStart; /* VA start of KVA space */ 234 vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */ 235 vm_offset_t KvaSize; /* max size of kernel virtual address space */ 236 vm_offset_t DMapMaxAddress; 237 /* Has pmap_init completed? */ 238 __read_frequently static boolean_t pmap_initialized = FALSE; 239 //static int pgeflag; /* PG_G or-in */ 240 static uint64_t PatMsr; 241 242 static int ndmpdp; 243 static vm_paddr_t dmaplimit; 244 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 245 246 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE]; /* PAT -> PG_ bits */ 247 static pt_entry_t pat_pde_index[PAT_INDEX_SIZE]; /* PAT -> PG_ bits */ 248 249 static uint64_t KPTbase; 250 static uint64_t KPTphys; 251 static uint64_t KPDphys; /* phys addr of kernel level 2 */ 252 static uint64_t KPDbase; /* phys addr of kernel level 2 @ KERNBASE */ 253 uint64_t KPDPphys; /* phys addr of kernel level 3 */ 254 uint64_t KPML4phys; /* phys addr of kernel level 4 */ 255 256 static uint64_t DMPDphys; /* phys addr of direct mapped level 2 */ 257 static uint64_t DMPDPphys; /* phys addr of direct mapped level 3 */ 258 259 /* 260 * Data for the pv entry allocation mechanism 261 */ 262 __read_mostly static vm_zone_t pvzone; 263 __read_mostly static int pmap_pagedaemon_waken = 0; 264 static struct vm_zone pvzone_store; 265 static struct pv_entry *pvinit; 266 267 /* 268 * All those kernel PT submaps that BSD is so fond of 269 */ 270 pt_entry_t *CMAP1 = NULL; 271 caddr_t CADDR1 = NULL, ptvmmap = NULL; 272 static pt_entry_t *msgbufmap, *ptmmap; 273 struct msgbuf *msgbufp=NULL; 274 275 /* 276 * PMAP default PG_* bits. Needed to be able to add 277 * EPT/NPT pagetable pmap_bits for the VMM module 278 */ 279 __read_frequently static uint64_t pmap_bits_default[] = { 280 REGULAR_PMAP, /* TYPE_IDX 0 */ 281 X86_PG_V, /* PG_V_IDX 1 */ 282 X86_PG_RW, /* PG_RW_IDX 2 */ 283 X86_PG_U, /* PG_U_IDX 3 */ 284 X86_PG_A, /* PG_A_IDX 4 */ 285 X86_PG_M, /* PG_M_IDX 5 */ 286 X86_PG_PS, /* PG_PS_IDX3 6 */ 287 X86_PG_G, /* PG_G_IDX 7 */ 288 X86_PG_AVAIL1, /* PG_AVAIL1_IDX 8 */ 289 X86_PG_AVAIL2, /* PG_AVAIL2_IDX 9 */ 290 X86_PG_AVAIL3, /* PG_AVAIL3_IDX 10 */ 291 X86_PG_NC_PWT | X86_PG_NC_PCD, /* PG_N_IDX 11 */ 292 X86_PG_NX, /* PG_NX_IDX 12 */ 293 }; 294 295 /* 296 * Crashdump maps. 297 */ 298 static pt_entry_t *pt_crashdumpmap; 299 static caddr_t crashdumpmap; 300 301 static int pmap_debug = 0; 302 SYSCTL_INT(_machdep, OID_AUTO, pmap_debug, CTLFLAG_RW, 303 &pmap_debug, 0, "Debug pmap's"); 304 #ifdef PMAP_DEBUG2 305 static int pmap_enter_debug = 0; 306 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW, 307 &pmap_enter_debug, 0, "Debug pmap_enter's"); 308 #endif 309 static int pmap_yield_count = 64; 310 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW, 311 &pmap_yield_count, 0, "Yield during init_pt/release"); 312 static int pmap_fast_kernel_cpusync = 0; 313 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW, 314 &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible"); 315 static int pmap_dynamic_delete = 0; 316 SYSCTL_INT(_machdep, OID_AUTO, pmap_dynamic_delete, CTLFLAG_RW, 317 &pmap_dynamic_delete, 0, "Dynamically delete PT/PD/PDPs"); 318 static int pmap_lock_delay = 100; 319 SYSCTL_INT(_machdep, OID_AUTO, pmap_lock_delay, CTLFLAG_RW, 320 &pmap_lock_delay, 0, "Spin loops"); 321 static int meltdown_mitigation = -1; 322 TUNABLE_INT("machdep.meltdown_mitigation", &meltdown_mitigation); 323 SYSCTL_INT(_machdep, OID_AUTO, meltdown_mitigation, CTLFLAG_RW, 324 &meltdown_mitigation, 0, "Userland pmap isolation"); 325 326 static int pmap_nx_enable = -1; /* -1 = auto */ 327 /* needs manual TUNABLE in early probe, see below */ 328 SYSCTL_INT(_machdep, OID_AUTO, pmap_nx_enable, CTLFLAG_RD, 329 &pmap_nx_enable, 0, 330 "no-execute support (0=disabled, 1=w/READ, 2=w/READ & WRITE)"); 331 332 static int pmap_pv_debug = 50; 333 SYSCTL_INT(_machdep, OID_AUTO, pmap_pv_debug, CTLFLAG_RW, 334 &pmap_pv_debug, 0, ""); 335 336 static long vm_pmap_pv_entries; 337 SYSCTL_LONG(_vm, OID_AUTO, pmap_pv_entries, CTLFLAG_RD, 338 &vm_pmap_pv_entries, 0, ""); 339 340 /* Standard user access funtions */ 341 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len, 342 size_t *lencopied); 343 extern int std_copyin (const void *udaddr, void *kaddr, size_t len); 344 extern int std_copyout (const void *kaddr, void *udaddr, size_t len); 345 extern int std_fubyte (const uint8_t *base); 346 extern int std_subyte (uint8_t *base, uint8_t byte); 347 extern int32_t std_fuword32 (const uint32_t *base); 348 extern int64_t std_fuword64 (const uint64_t *base); 349 extern int std_suword64 (uint64_t *base, uint64_t word); 350 extern int std_suword32 (uint32_t *base, int word); 351 extern uint32_t std_swapu32 (volatile uint32_t *base, uint32_t v); 352 extern uint64_t std_swapu64 (volatile uint64_t *base, uint64_t v); 353 extern uint32_t std_fuwordadd32 (volatile uint32_t *base, uint32_t v); 354 extern uint64_t std_fuwordadd64 (volatile uint64_t *base, uint64_t v); 355 356 #if 0 357 static void pv_hold(pv_entry_t pv); 358 #endif 359 static int _pv_hold_try(pv_entry_t pv 360 PMAP_DEBUG_DECL); 361 static void pv_drop(pv_entry_t pv); 362 static void _pv_lock(pv_entry_t pv 363 PMAP_DEBUG_DECL); 364 static void pv_unlock(pv_entry_t pv); 365 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew 366 PMAP_DEBUG_DECL); 367 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp 368 PMAP_DEBUG_DECL); 369 static void _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL); 370 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex, 371 vm_pindex_t **pmarkp, int *errorp); 372 static void pv_put(pv_entry_t pv); 373 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex); 374 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, 375 pv_entry_t *pvpp); 376 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, 377 pmap_inval_bulk_t *bulk, int destroy); 378 static vm_page_t pmap_remove_pv_page(pv_entry_t pv, int clrpgbits); 379 static int pmap_release_pv(pv_entry_t pv, pv_entry_t pvp, 380 pmap_inval_bulk_t *bulk); 381 382 struct pmap_scan_info; 383 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info, 384 vm_pindex_t *pte_placemark, pv_entry_t pt_pv, 385 vm_offset_t va, pt_entry_t *ptep, void *arg __unused); 386 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info, 387 vm_pindex_t *pte_placemark, pv_entry_t pt_pv, 388 vm_offset_t va, pt_entry_t *ptep, void *arg __unused); 389 390 static void x86_64_protection_init (void); 391 static void create_pagetables(vm_paddr_t *firstaddr); 392 static void pmap_remove_all (vm_page_t m); 393 static boolean_t pmap_testbit (vm_page_t m, int bit); 394 395 static pt_entry_t *pmap_pte_quick (pmap_t pmap, vm_offset_t va); 396 static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 397 398 static void pmap_pinit_defaults(struct pmap *pmap); 399 static void pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark); 400 static void pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark); 401 402 static int 403 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2) 404 { 405 if (pv1->pv_pindex < pv2->pv_pindex) 406 return(-1); 407 if (pv1->pv_pindex > pv2->pv_pindex) 408 return(1); 409 return(0); 410 } 411 412 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry, 413 pv_entry_compare, vm_pindex_t, pv_pindex); 414 415 /* 416 * We have removed a managed pte. The page might not be hard or soft-busied 417 * at this point so we have to be careful. 418 * 419 * If advanced mode is enabled we can clear PG_MAPPED/WRITEABLE only if 420 * MAPPEDMULTI is not set. This must be done atomically against possible 421 * concurrent pmap_enter()s occurring at the same time. If MULTI is set 422 * then the kernel may have to call vm_page_protect() later on to clean 423 * the bits up. This is particularly important for kernel_map/kernel_object 424 * mappings due to the expense of scanning the kernel_object's vm_backing's. 425 * 426 * If advanced mode is not enabled we update our tracking counts and 427 * synchronize PG_MAPPED/WRITEABLE later on in pmap_mapped_sync(). 428 */ 429 static __inline 430 void 431 pmap_removed_pte(vm_page_t m, pt_entry_t pte) 432 { 433 int flags; 434 int nflags; 435 436 flags = m->flags; 437 cpu_ccfence(); 438 while ((flags & PG_MAPPEDMULTI) == 0) { 439 nflags = flags & ~(PG_MAPPED | PG_WRITEABLE); 440 if (atomic_fcmpset_int(&m->flags, &flags, nflags)) 441 break; 442 } 443 } 444 445 /* 446 * Move the kernel virtual free pointer to the next 447 * 2MB. This is used to help improve performance 448 * by using a large (2MB) page for much of the kernel 449 * (.text, .data, .bss) 450 */ 451 static 452 vm_offset_t 453 pmap_kmem_choose(vm_offset_t addr) 454 { 455 vm_offset_t newaddr = addr; 456 457 newaddr = roundup2(addr, NBPDR); 458 return newaddr; 459 } 460 461 /* 462 * Returns the pindex of a page table entry (representing a terminal page). 463 * There are NUPTE_TOTAL page table entries possible (a huge number) 464 * 465 * x86-64 has a 48-bit address space, where bit 47 is sign-extended out. 466 * We want to properly translate negative KVAs. 467 */ 468 static __inline 469 vm_pindex_t 470 pmap_pte_pindex(vm_offset_t va) 471 { 472 return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1)); 473 } 474 475 /* 476 * Returns the pindex of a page table. 477 */ 478 static __inline 479 vm_pindex_t 480 pmap_pt_pindex(vm_offset_t va) 481 { 482 return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1))); 483 } 484 485 /* 486 * Returns the pindex of a page directory. 487 */ 488 static __inline 489 vm_pindex_t 490 pmap_pd_pindex(vm_offset_t va) 491 { 492 return (NUPTE_TOTAL + NUPT_TOTAL + 493 ((va >> PDPSHIFT) & (NUPD_TOTAL - 1))); 494 } 495 496 static __inline 497 vm_pindex_t 498 pmap_pdp_pindex(vm_offset_t va) 499 { 500 return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + 501 ((va >> PML4SHIFT) & (NUPDP_TOTAL - 1))); 502 } 503 504 static __inline 505 vm_pindex_t 506 pmap_pml4_pindex(void) 507 { 508 return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL); 509 } 510 511 /* 512 * Return various clipped indexes for a given VA 513 * 514 * Returns the index of a pt in a page directory, representing a page 515 * table. 516 */ 517 static __inline 518 vm_pindex_t 519 pmap_pt_index(vm_offset_t va) 520 { 521 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); 522 } 523 524 /* 525 * Returns the index of a pd in a page directory page, representing a page 526 * directory. 527 */ 528 static __inline 529 vm_pindex_t 530 pmap_pd_index(vm_offset_t va) 531 { 532 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); 533 } 534 535 /* 536 * Returns the index of a pdp in the pml4 table, representing a page 537 * directory page. 538 */ 539 static __inline 540 vm_pindex_t 541 pmap_pdp_index(vm_offset_t va) 542 { 543 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); 544 } 545 546 /* 547 * Of all the layers (PT, PD, PDP, PML4) the best one to cache is 548 * the PT layer. This will speed up core pmap operations considerably. 549 * 550 * NOTE: The pmap spinlock does not need to be held but the passed-in pv 551 * must be in a known associated state (typically by being locked when 552 * the pmap spinlock isn't held). We allow the race for that case. 553 * 554 * NOTE: pm_pvhint* is only accessed (read) with the spin-lock held, using 555 * cpu_ccfence() to prevent compiler optimizations from reloading the 556 * field. 557 */ 558 static __inline 559 void 560 pv_cache(pmap_t pmap, pv_entry_t pv, vm_pindex_t pindex) 561 { 562 if (pindex < pmap_pt_pindex(0)) { 563 ; 564 } else if (pindex < pmap_pd_pindex(0)) { 565 pmap->pm_pvhint_pt = pv; 566 } 567 } 568 569 /* 570 * Locate the requested pt_entry 571 */ 572 static __inline 573 pv_entry_t 574 pv_entry_lookup(pmap_t pmap, vm_pindex_t pindex) 575 { 576 pv_entry_t pv; 577 578 if (pindex < pmap_pt_pindex(0)) 579 return NULL; 580 #if 1 581 if (pindex < pmap_pd_pindex(0)) 582 pv = pmap->pm_pvhint_pt; 583 else 584 pv = NULL; 585 cpu_ccfence(); 586 if (pv == NULL || pv->pv_pmap != pmap) { 587 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex); 588 if (pv) 589 pv_cache(pmap, pv, pindex); 590 } else if (pv->pv_pindex != pindex) { 591 pv = pv_entry_rb_tree_RB_LOOKUP_REL(&pmap->pm_pvroot, 592 pindex, pv); 593 if (pv) 594 pv_cache(pmap, pv, pindex); 595 } 596 #else 597 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex); 598 #endif 599 return pv; 600 } 601 602 /* 603 * pmap_pte_quick: 604 * 605 * Super fast pmap_pte routine best used when scanning the pv lists. 606 * This eliminates many course-grained invltlb calls. Note that many of 607 * the pv list scans are across different pmaps and it is very wasteful 608 * to do an entire invltlb when checking a single mapping. 609 */ 610 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va); 611 612 static 613 pt_entry_t * 614 pmap_pte_quick(pmap_t pmap, vm_offset_t va) 615 { 616 return pmap_pte(pmap, va); 617 } 618 619 /* 620 * The placemarker hash must be broken up into four zones so lock 621 * ordering semantics continue to work (e.g. pte, pt, pd, then pdp). 622 * 623 * Placemarkers are used to 'lock' page table indices that do not have 624 * a pv_entry. This allows the pmap to support managed and unmanaged 625 * pages and shared page tables. 626 */ 627 #define PM_PLACE_BASE (PM_PLACEMARKS >> 2) 628 629 static __inline 630 vm_pindex_t * 631 pmap_placemarker_hash(pmap_t pmap, vm_pindex_t pindex) 632 { 633 int hi; 634 635 if (pindex < pmap_pt_pindex(0)) /* zone 0 - PTE */ 636 hi = 0; 637 else if (pindex < pmap_pd_pindex(0)) /* zone 1 - PT */ 638 hi = PM_PLACE_BASE; 639 else if (pindex < pmap_pdp_pindex(0)) /* zone 2 - PD */ 640 hi = PM_PLACE_BASE << 1; 641 else /* zone 3 - PDP (and PML4E) */ 642 hi = PM_PLACE_BASE | (PM_PLACE_BASE << 1); 643 hi += pindex & (PM_PLACE_BASE - 1); 644 645 return (&pmap->pm_placemarks[hi]); 646 } 647 648 649 /* 650 * Generic procedure to index a pte from a pt, pd, or pdp. 651 * 652 * NOTE: Normally passed pindex as pmap_xx_index(). pmap_xx_pindex() is NOT 653 * a page table page index but is instead of PV lookup index. 654 */ 655 static 656 void * 657 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex) 658 { 659 pt_entry_t *pte; 660 661 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m)); 662 return(&pte[pindex]); 663 } 664 665 /* 666 * Return pointer to PDP slot in the PML4 667 */ 668 static __inline 669 pml4_entry_t * 670 pmap_pdp(pmap_t pmap, vm_offset_t va) 671 { 672 return (&pmap->pm_pml4[pmap_pdp_index(va)]); 673 } 674 675 /* 676 * Return pointer to PD slot in the PDP given a pointer to the PDP 677 */ 678 static __inline 679 pdp_entry_t * 680 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va) 681 { 682 pdp_entry_t *pd; 683 684 pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME); 685 return (&pd[pmap_pd_index(va)]); 686 } 687 688 /* 689 * Return pointer to PD slot in the PDP. 690 */ 691 static __inline 692 pdp_entry_t * 693 pmap_pd(pmap_t pmap, vm_offset_t va) 694 { 695 pml4_entry_t *pdp; 696 697 pdp = pmap_pdp(pmap, va); 698 if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0) 699 return NULL; 700 return (pmap_pdp_to_pd(*pdp, va)); 701 } 702 703 /* 704 * Return pointer to PT slot in the PD given a pointer to the PD 705 */ 706 static __inline 707 pd_entry_t * 708 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va) 709 { 710 pd_entry_t *pt; 711 712 pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME); 713 return (&pt[pmap_pt_index(va)]); 714 } 715 716 /* 717 * Return pointer to PT slot in the PD 718 * 719 * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs, 720 * so we cannot lookup the PD via the PDP. Instead we 721 * must look it up via the pmap. 722 */ 723 static __inline 724 pd_entry_t * 725 pmap_pt(pmap_t pmap, vm_offset_t va) 726 { 727 pdp_entry_t *pd; 728 pv_entry_t pv; 729 vm_pindex_t pd_pindex; 730 vm_paddr_t phys; 731 732 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) { 733 pd_pindex = pmap_pd_pindex(va); 734 spin_lock_shared(&pmap->pm_spin); 735 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex); 736 if (pv == NULL || pv->pv_m == NULL) { 737 spin_unlock_shared(&pmap->pm_spin); 738 return NULL; 739 } 740 phys = VM_PAGE_TO_PHYS(pv->pv_m); 741 spin_unlock_shared(&pmap->pm_spin); 742 return (pmap_pd_to_pt(phys, va)); 743 } else { 744 pd = pmap_pd(pmap, va); 745 if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0) 746 return NULL; 747 return (pmap_pd_to_pt(*pd, va)); 748 } 749 } 750 751 /* 752 * Return pointer to PTE slot in the PT given a pointer to the PT 753 */ 754 static __inline 755 pt_entry_t * 756 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va) 757 { 758 pt_entry_t *pte; 759 760 pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME); 761 return (&pte[pmap_pte_index(va)]); 762 } 763 764 /* 765 * Return pointer to PTE slot in the PT 766 */ 767 static __inline 768 pt_entry_t * 769 pmap_pte(pmap_t pmap, vm_offset_t va) 770 { 771 pd_entry_t *pt; 772 773 pt = pmap_pt(pmap, va); 774 if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0) 775 return NULL; 776 if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0) 777 return ((pt_entry_t *)pt); 778 return (pmap_pt_to_pte(*pt, va)); 779 } 780 781 /* 782 * Return address of PT slot in PD (KVM only) 783 * 784 * Cannot be used for user page tables because it might interfere with 785 * the shared page-table-page optimization (pmap_mmu_optimize). 786 */ 787 static __inline 788 pd_entry_t * 789 vtopt(vm_offset_t va) 790 { 791 uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + 792 NPML4EPGSHIFT)) - 1); 793 794 return (PDmap + ((va >> PDRSHIFT) & mask)); 795 } 796 797 /* 798 * KVM - return address of PTE slot in PT 799 */ 800 static __inline 801 pt_entry_t * 802 vtopte(vm_offset_t va) 803 { 804 uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + 805 NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); 806 807 return (PTmap + ((va >> PAGE_SHIFT) & mask)); 808 } 809 810 /* 811 * Returns the physical address translation from va for a user address. 812 * (vm_paddr_t)-1 is returned on failure. 813 */ 814 vm_paddr_t 815 uservtophys(vm_offset_t va) 816 { 817 uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + 818 NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); 819 vm_paddr_t pa; 820 pt_entry_t pte; 821 pmap_t pmap; 822 823 pmap = vmspace_pmap(mycpu->gd_curthread->td_lwp->lwp_vmspace); 824 pa = (vm_paddr_t)-1; 825 if (va < VM_MAX_USER_ADDRESS) { 826 pte = kreadmem64(PTmap + ((va >> PAGE_SHIFT) & mask)); 827 if (pte & pmap->pmap_bits[PG_V_IDX]) 828 pa = (pte & PG_FRAME) | (va & PAGE_MASK); 829 } 830 return pa; 831 } 832 833 static uint64_t 834 allocpages(vm_paddr_t *firstaddr, long n) 835 { 836 uint64_t ret; 837 838 ret = *firstaddr; 839 bzero((void *)ret, n * PAGE_SIZE); 840 *firstaddr += n * PAGE_SIZE; 841 return (ret); 842 } 843 844 static 845 void 846 create_pagetables(vm_paddr_t *firstaddr) 847 { 848 long i; /* must be 64 bits */ 849 long nkpt_base; 850 long nkpt_phys; 851 long nkpd_phys; 852 int j; 853 854 /* 855 * We are running (mostly) V=P at this point 856 * 857 * Calculate how many 1GB PD entries in our PDP pages are needed 858 * for the DMAP. This is only allocated if the system does not 859 * support 1GB pages. Otherwise ndmpdp is simply a count of 860 * the number of 1G terminal entries in our PDP pages are needed. 861 * 862 * NOTE: Maxmem is in pages 863 */ 864 ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT; 865 if (ndmpdp < 4) /* Minimum 4GB of DMAP */ 866 ndmpdp = 4; 867 868 #if 0 869 /* 870 * HACK XXX fix me - Some laptops map the EFI framebuffer in 871 * very high physical addresses and the DMAP winds up being too 872 * small. The EFI framebuffer has to be mapped for the console 873 * very early and the DMAP is how it does it. 874 */ 875 if (ndmpdp < 512) /* Minimum 512GB of DMAP */ 876 ndmpdp = 512; 877 #endif 878 879 KKASSERT(ndmpdp <= NDMPML4E * NPML4EPG); 880 DMapMaxAddress = DMAP_MIN_ADDRESS + 881 ((ndmpdp * NPDEPG) << PDRSHIFT); 882 883 /* 884 * Starting at KERNBASE - map all 2G worth of page table pages. 885 * KERNBASE is offset -2G from the end of kvm. This will accomodate 886 * all KVM allocations above KERNBASE, including the SYSMAPs below. 887 * 888 * We do this by allocating 2*512 PT pages. Each PT page can map 889 * 2MB, for 2GB total. 890 */ 891 nkpt_base = (NPDPEPG - KPDPI) * NPTEPG; /* typically 2 x 512 */ 892 893 /* 894 * Starting at the beginning of kvm (VM_MIN_KERNEL_ADDRESS), 895 * Calculate how many page table pages we need to preallocate 896 * for early vm_map allocations. 897 * 898 * A few extra won't hurt, they will get used up in the running 899 * system. 900 * 901 * vm_page array 902 * initial pventry's 903 */ 904 nkpt_phys = howmany(Maxmem * sizeof(struct vm_page), NBPDR); 905 nkpt_phys += howmany(Maxmem * sizeof(struct pv_entry), NBPDR); 906 nkpt_phys += 128; /* a few extra */ 907 908 /* 909 * The highest value nkpd_phys can be set to is 910 * NKPDPE - (NPDPEPG - KPDPI) (i.e. NKPDPE - 2). 911 * 912 * Doing so would cause all PD pages to be pre-populated for 913 * a maximal KVM space (approximately 16*512 pages, or 32MB. 914 * We can save memory by not doing this. 915 */ 916 nkpd_phys = (nkpt_phys + NPDPEPG - 1) / NPDPEPG; 917 918 /* 919 * Allocate pages 920 * 921 * Normally NKPML4E=1-16 (1-16 kernel PDP page) 922 * Normally NKPDPE= NKPML4E*512-1 (511 min kernel PD pages) 923 * 924 * Only allocate enough PD pages 925 * NOTE: We allocate all kernel PD pages up-front, typically 926 * ~511G of KVM, requiring 511 PD pages. 927 */ 928 KPTbase = allocpages(firstaddr, nkpt_base); /* KERNBASE to end */ 929 KPTphys = allocpages(firstaddr, nkpt_phys); /* KVA start */ 930 KPML4phys = allocpages(firstaddr, 1); /* recursive PML4 map */ 931 KPDPphys = allocpages(firstaddr, NKPML4E); /* kernel PDP pages */ 932 KPDphys = allocpages(firstaddr, nkpd_phys); /* kernel PD pages */ 933 934 /* 935 * Alloc PD pages for the area starting at KERNBASE. 936 */ 937 KPDbase = allocpages(firstaddr, NPDPEPG - KPDPI); 938 939 /* 940 * Stuff for our DMAP. Use 2MB pages even when 1GB pages 941 * are available in order to allow APU code to adjust page 942 * attributes on a fixed grain (see pmap_change_attr()). 943 */ 944 DMPDPphys = allocpages(firstaddr, NDMPML4E); 945 #if 1 946 DMPDphys = allocpages(firstaddr, ndmpdp); 947 #else 948 if ((amd_feature & AMDID_PAGE1GB) == 0) 949 DMPDphys = allocpages(firstaddr, ndmpdp); 950 #endif 951 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; 952 953 /* 954 * Fill in the underlying page table pages for the area around 955 * KERNBASE. This remaps low physical memory to KERNBASE. 956 * 957 * Read-only from zero to physfree 958 * XXX not fully used, underneath 2M pages 959 */ 960 for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) { 961 ((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT; 962 ((pt_entry_t *)KPTbase)[i] |= 963 pmap_bits_default[PG_RW_IDX] | 964 pmap_bits_default[PG_V_IDX] | 965 pmap_bits_default[PG_G_IDX]; 966 } 967 968 /* 969 * Now map the initial kernel page tables. One block of page 970 * tables is placed at the beginning of kernel virtual memory, 971 * and another block is placed at KERNBASE to map the kernel binary, 972 * data, bss, and initial pre-allocations. 973 */ 974 for (i = 0; i < nkpt_base; i++) { 975 ((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT); 976 ((pd_entry_t *)KPDbase)[i] |= 977 pmap_bits_default[PG_RW_IDX] | 978 pmap_bits_default[PG_V_IDX]; 979 } 980 for (i = 0; i < nkpt_phys; i++) { 981 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT); 982 ((pd_entry_t *)KPDphys)[i] |= 983 pmap_bits_default[PG_RW_IDX] | 984 pmap_bits_default[PG_V_IDX]; 985 } 986 987 /* 988 * Map from zero to end of allocations using 2M pages as an 989 * optimization. This will bypass some of the KPTBase pages 990 * above in the KERNBASE area. 991 */ 992 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) { 993 ((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT; 994 ((pd_entry_t *)KPDbase)[i] |= 995 pmap_bits_default[PG_RW_IDX] | 996 pmap_bits_default[PG_V_IDX] | 997 pmap_bits_default[PG_PS_IDX] | 998 pmap_bits_default[PG_G_IDX]; 999 } 1000 1001 /* 1002 * Load PD addresses into the PDP pages for primary KVA space to 1003 * cover existing page tables. PD's for KERNBASE are handled in 1004 * the next loop. 1005 * 1006 * expected to pre-populate all of its PDs. See NKPDPE in vmparam.h. 1007 */ 1008 for (i = 0; i < nkpd_phys; i++) { 1009 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] = 1010 KPDphys + (i << PAGE_SHIFT); 1011 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] |= 1012 pmap_bits_default[PG_RW_IDX] | 1013 pmap_bits_default[PG_V_IDX] | 1014 pmap_bits_default[PG_A_IDX]; 1015 } 1016 1017 /* 1018 * Load PDs for KERNBASE to the end 1019 */ 1020 i = (NKPML4E - 1) * NPDPEPG + KPDPI; 1021 for (j = 0; j < NPDPEPG - KPDPI; ++j) { 1022 ((pdp_entry_t *)KPDPphys)[i + j] = 1023 KPDbase + (j << PAGE_SHIFT); 1024 ((pdp_entry_t *)KPDPphys)[i + j] |= 1025 pmap_bits_default[PG_RW_IDX] | 1026 pmap_bits_default[PG_V_IDX] | 1027 pmap_bits_default[PG_A_IDX]; 1028 } 1029 1030 /* 1031 * Now set up the direct map space using either 2MB or 1GB pages 1032 * Preset PG_M and PG_A because demotion expects it. 1033 * 1034 * When filling in entries in the PD pages make sure any excess 1035 * entries are set to zero as we allocated enough PD pages 1036 * 1037 * Stuff for our DMAP. Use 2MB pages even when 1GB pages 1038 * are available in order to allow APU code to adjust page 1039 * attributes on a fixed grain (see pmap_change_attr()). 1040 */ 1041 #if 0 1042 if ((amd_feature & AMDID_PAGE1GB) == 0) 1043 #endif 1044 { 1045 /* 1046 * Use 2MB pages 1047 */ 1048 for (i = 0; i < NPDEPG * ndmpdp; i++) { 1049 ((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT; 1050 ((pd_entry_t *)DMPDphys)[i] |= 1051 pmap_bits_default[PG_RW_IDX] | 1052 pmap_bits_default[PG_V_IDX] | 1053 pmap_bits_default[PG_PS_IDX] | 1054 pmap_bits_default[PG_G_IDX] | 1055 pmap_bits_default[PG_M_IDX] | 1056 pmap_bits_default[PG_A_IDX]; 1057 } 1058 1059 /* 1060 * And the direct map space's PDP 1061 */ 1062 for (i = 0; i < ndmpdp; i++) { 1063 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + 1064 (i << PAGE_SHIFT); 1065 ((pdp_entry_t *)DMPDPphys)[i] |= 1066 pmap_bits_default[PG_RW_IDX] | 1067 pmap_bits_default[PG_V_IDX] | 1068 pmap_bits_default[PG_A_IDX]; 1069 } 1070 } 1071 #if 0 1072 else { 1073 /* 1074 * 1GB pages 1075 */ 1076 for (i = 0; i < ndmpdp; i++) { 1077 ((pdp_entry_t *)DMPDPphys)[i] = 1078 (vm_paddr_t)i << PDPSHIFT; 1079 ((pdp_entry_t *)DMPDPphys)[i] |= 1080 pmap_bits_default[PG_RW_IDX] | 1081 pmap_bits_default[PG_V_IDX] | 1082 pmap_bits_default[PG_PS_IDX] | 1083 pmap_bits_default[PG_G_IDX] | 1084 pmap_bits_default[PG_M_IDX] | 1085 pmap_bits_default[PG_A_IDX]; 1086 } 1087 } 1088 #endif 1089 1090 /* And recursively map PML4 to itself in order to get PTmap */ 1091 ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys; 1092 ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= 1093 pmap_bits_default[PG_RW_IDX] | 1094 pmap_bits_default[PG_V_IDX] | 1095 pmap_bits_default[PG_A_IDX]; 1096 1097 /* 1098 * Connect the Direct Map slots up to the PML4 1099 */ 1100 for (j = 0; j < NDMPML4E; ++j) { 1101 ((pdp_entry_t *)KPML4phys)[DMPML4I + j] = 1102 (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) | 1103 pmap_bits_default[PG_RW_IDX] | 1104 pmap_bits_default[PG_V_IDX] | 1105 pmap_bits_default[PG_A_IDX]; 1106 } 1107 1108 /* 1109 * Connect the KVA slot up to the PML4 1110 */ 1111 for (j = 0; j < NKPML4E; ++j) { 1112 ((pdp_entry_t *)KPML4phys)[KPML4I + j] = 1113 KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT); 1114 ((pdp_entry_t *)KPML4phys)[KPML4I + j] |= 1115 pmap_bits_default[PG_RW_IDX] | 1116 pmap_bits_default[PG_V_IDX] | 1117 pmap_bits_default[PG_A_IDX]; 1118 } 1119 cpu_mfence(); 1120 cpu_invltlb(); 1121 } 1122 1123 /* 1124 * Bootstrap the system enough to run with virtual memory. 1125 * 1126 * On x86_64 this is called after mapping has already been enabled 1127 * and just syncs the pmap module with what has already been done. 1128 * [We can't call it easily with mapping off since the kernel is not 1129 * mapped with PA == VA, hence we would have to relocate every address 1130 * from the linked base (virtual) address "KERNBASE" to the actual 1131 * (physical) address starting relative to 0] 1132 */ 1133 void 1134 pmap_bootstrap(vm_paddr_t *firstaddr) 1135 { 1136 vm_offset_t va; 1137 pt_entry_t *pte; 1138 int i; 1139 1140 KvaStart = VM_MIN_KERNEL_ADDRESS; 1141 KvaEnd = VM_MAX_KERNEL_ADDRESS; 1142 KvaSize = KvaEnd - KvaStart; 1143 1144 avail_start = *firstaddr; 1145 1146 /* 1147 * Create an initial set of page tables to run the kernel in. 1148 */ 1149 create_pagetables(firstaddr); 1150 1151 virtual2_start = KvaStart; 1152 virtual2_end = PTOV_OFFSET; 1153 1154 virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr; 1155 virtual_start = pmap_kmem_choose(virtual_start); 1156 1157 virtual_end = VM_MAX_KERNEL_ADDRESS; 1158 1159 /* XXX do %cr0 as well */ 1160 load_cr4(rcr4() | CR4_PGE | CR4_PSE); 1161 load_cr3(KPML4phys); 1162 1163 /* 1164 * Initialize protection array. 1165 */ 1166 x86_64_protection_init(); 1167 1168 /* 1169 * The kernel's pmap is statically allocated so we don't have to use 1170 * pmap_create, which is unlikely to work correctly at this part of 1171 * the boot sequence (XXX and which no longer exists). 1172 */ 1173 kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys); 1174 kernel_pmap.pm_count = 1; 1175 CPUMASK_ASSALLONES(kernel_pmap.pm_active); 1176 RB_INIT(&kernel_pmap.pm_pvroot); 1177 spin_init(&kernel_pmap.pm_spin, "pmapbootstrap"); 1178 for (i = 0; i < PM_PLACEMARKS; ++i) 1179 kernel_pmap.pm_placemarks[i] = PM_NOPLACEMARK; 1180 1181 /* 1182 * Reserve some special page table entries/VA space for temporary 1183 * mapping of pages. 1184 */ 1185 #define SYSMAP(c, p, v, n) \ 1186 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 1187 1188 va = virtual_start; 1189 pte = vtopte(va); 1190 1191 /* 1192 * CMAP1/CMAP2 are used for zeroing and copying pages. 1193 */ 1194 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 1195 1196 /* 1197 * Crashdump maps. 1198 */ 1199 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS); 1200 1201 /* 1202 * ptvmmap is used for reading arbitrary physical pages via 1203 * /dev/mem. 1204 */ 1205 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 1206 1207 /* 1208 * msgbufp is used to map the system message buffer. 1209 * XXX msgbufmap is not used. 1210 */ 1211 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1212 atop(round_page(MSGBUF_SIZE))) 1213 1214 virtual_start = va; 1215 virtual_start = pmap_kmem_choose(virtual_start); 1216 1217 *CMAP1 = 0; 1218 1219 /* 1220 * PG_G is terribly broken on SMP because we IPI invltlb's in some 1221 * cases rather then invl1pg. Actually, I don't even know why it 1222 * works under UP because self-referential page table mappings 1223 */ 1224 // pgeflag = 0; 1225 1226 cpu_invltlb(); 1227 1228 /* Initialize the PAT MSR */ 1229 pmap_init_pat(); 1230 pmap_pinit_defaults(&kernel_pmap); 1231 1232 TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync", 1233 &pmap_fast_kernel_cpusync); 1234 1235 } 1236 1237 /* 1238 * Setup the PAT MSR. 1239 */ 1240 void 1241 pmap_init_pat(void) 1242 { 1243 uint64_t pat_msr; 1244 u_long cr0, cr4; 1245 int i; 1246 1247 /* 1248 * Default values mapping PATi,PCD,PWT bits at system reset. 1249 * The default values effectively ignore the PATi bit by 1250 * repeating the encodings for 0-3 in 4-7, and map the PCD 1251 * and PWT bit combinations to the expected PAT types. 1252 */ 1253 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | /* 000 */ 1254 PAT_VALUE(1, PAT_WRITE_THROUGH) | /* 001 */ 1255 PAT_VALUE(2, PAT_UNCACHED) | /* 010 */ 1256 PAT_VALUE(3, PAT_UNCACHEABLE) | /* 011 */ 1257 PAT_VALUE(4, PAT_WRITE_BACK) | /* 100 */ 1258 PAT_VALUE(5, PAT_WRITE_THROUGH) | /* 101 */ 1259 PAT_VALUE(6, PAT_UNCACHED) | /* 110 */ 1260 PAT_VALUE(7, PAT_UNCACHEABLE); /* 111 */ 1261 pat_pte_index[PAT_WRITE_BACK] = 0; 1262 pat_pte_index[PAT_WRITE_THROUGH]= 0 | X86_PG_NC_PWT; 1263 pat_pte_index[PAT_UNCACHED] = X86_PG_NC_PCD; 1264 pat_pte_index[PAT_UNCACHEABLE] = X86_PG_NC_PCD | X86_PG_NC_PWT; 1265 pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE]; 1266 pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE]; 1267 1268 if (cpu_feature & CPUID_PAT) { 1269 /* 1270 * If we support the PAT then set-up entries for 1271 * WRITE_PROTECTED and WRITE_COMBINING using bit patterns 1272 * 5 and 6. 1273 */ 1274 pat_msr = (pat_msr & ~PAT_MASK(5)) | 1275 PAT_VALUE(5, PAT_WRITE_PROTECTED); 1276 pat_msr = (pat_msr & ~PAT_MASK(6)) | 1277 PAT_VALUE(6, PAT_WRITE_COMBINING); 1278 pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | X86_PG_NC_PWT; 1279 pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PCD; 1280 1281 /* 1282 * Then enable the PAT 1283 */ 1284 1285 /* Disable PGE. */ 1286 cr4 = rcr4(); 1287 load_cr4(cr4 & ~CR4_PGE); 1288 1289 /* Disable caches (CD = 1, NW = 0). */ 1290 cr0 = rcr0(); 1291 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 1292 1293 /* Flushes caches and TLBs. */ 1294 wbinvd(); 1295 cpu_invltlb(); 1296 1297 /* Update PAT and index table. */ 1298 wrmsr(MSR_PAT, pat_msr); 1299 1300 /* Flush caches and TLBs again. */ 1301 wbinvd(); 1302 cpu_invltlb(); 1303 1304 /* Restore caches and PGE. */ 1305 load_cr0(cr0); 1306 load_cr4(cr4); 1307 PatMsr = pat_msr; 1308 } 1309 1310 for (i = 0; i < 8; ++i) { 1311 pt_entry_t pte; 1312 1313 pte = pat_pte_index[i]; 1314 if (pte & X86_PG_PTE_PAT) { 1315 pte &= ~X86_PG_PTE_PAT; 1316 pte |= X86_PG_PDE_PAT; 1317 } 1318 pat_pde_index[i] = pte; 1319 } 1320 } 1321 1322 /* 1323 * Set 4mb pdir for mp startup 1324 */ 1325 void 1326 pmap_set_opt(void) 1327 { 1328 if (cpu_feature & CPUID_PSE) { 1329 load_cr4(rcr4() | CR4_PSE); 1330 if (mycpu->gd_cpuid == 0) /* only on BSP */ 1331 cpu_invltlb(); 1332 } 1333 1334 /* 1335 * Check for SMAP support and enable if available. Must be done 1336 * after cr3 is loaded, and on all cores. 1337 */ 1338 if (cpu_stdext_feature & CPUID_STDEXT_SMAP) { 1339 load_cr4(rcr4() | CR4_SMAP); 1340 } 1341 if (cpu_stdext_feature & CPUID_STDEXT_SMEP) { 1342 load_cr4(rcr4() | CR4_SMEP); 1343 } 1344 } 1345 1346 /* 1347 * SMAP is just a processor flag, but SMEP can only be enabled 1348 * and disabled via CR4. We still use the processor flag to 1349 * disable SMAP because the page-fault/trap code checks it, in 1350 * order to allow a page-fault to actually occur. 1351 */ 1352 void 1353 smap_smep_disable(void) 1354 { 1355 /* 1356 * disable SMAP. This also bypasses a software failsafe check 1357 * in the trap() code. 1358 */ 1359 smap_open(); 1360 1361 /* 1362 * Also needed to bypass a software failsafe check in the trap() 1363 * code and allow the userspace address fault from kernel mode 1364 * to proceed. 1365 * 1366 * Note that This will not reload %rip because pcb_onfault_rsp will 1367 * not match. Just setting it to non-NULL is sufficient to bypass 1368 * the checks. 1369 */ 1370 curthread->td_pcb->pcb_onfault = (void *)1; 1371 1372 /* 1373 * Disable SMEP (requires modifying cr4) 1374 */ 1375 if (cpu_stdext_feature & CPUID_STDEXT_SMEP) 1376 load_cr4(rcr4() & ~CR4_SMEP); 1377 } 1378 1379 void 1380 smap_smep_enable(void) 1381 { 1382 if (cpu_stdext_feature & CPUID_STDEXT_SMEP) 1383 load_cr4(rcr4() | CR4_SMEP); 1384 curthread->td_pcb->pcb_onfault = NULL; 1385 smap_close(); 1386 } 1387 1388 /* 1389 * Early initialization of the pmap module. 1390 * 1391 * Called by vm_init, to initialize any structures that the pmap 1392 * system needs to map virtual memory. pmap_init has been enhanced to 1393 * support in a fairly consistant way, discontiguous physical memory. 1394 */ 1395 void 1396 pmap_init(void) 1397 { 1398 vm_pindex_t initial_pvs; 1399 vm_pindex_t i; 1400 1401 /* 1402 * Allocate memory for random pmap data structures. Includes the 1403 * pv_head_table. 1404 */ 1405 for (i = 0; i < vm_page_array_size; i++) { 1406 vm_page_t m; 1407 1408 m = &vm_page_array[i]; 1409 m->md.interlock_count = 0; 1410 } 1411 1412 /* 1413 * init the pv free list 1414 */ 1415 initial_pvs = vm_page_array_size; 1416 if (initial_pvs < MINPV) 1417 initial_pvs = MINPV; 1418 pvzone = &pvzone_store; 1419 pvinit = (void *)kmem_alloc(&kernel_map, 1420 initial_pvs * sizeof (struct pv_entry), 1421 VM_SUBSYS_PVENTRY); 1422 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), 1423 pvinit, initial_pvs); 1424 1425 /* 1426 * Now it is safe to enable pv_table recording. 1427 */ 1428 pmap_initialized = TRUE; 1429 } 1430 1431 /* 1432 * Initialize the address space (zone) for the pv_entries. Set a 1433 * high water mark so that the system can recover from excessive 1434 * numbers of pv entries. 1435 * 1436 * Also create the kernel page table template for isolated user 1437 * pmaps. 1438 */ 1439 static void pmap_init_iso_range(vm_offset_t base, size_t bytes); 1440 static void pmap_init2_iso_pmap(void); 1441 #if 0 1442 static void dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base); 1443 #endif 1444 1445 void 1446 pmap_init2(void) 1447 { 1448 vm_pindex_t entry_max; 1449 1450 /* 1451 * We can significantly reduce pv_entry_max from historical 1452 * levels because pv_entry's are no longer use for PTEs at the 1453 * leafs. This prevents excessive pcpu caching on many-core 1454 * boxes (even with the further '/ 16' done in zinitna(). 1455 * 1456 * Remember, however, that processes can share physical pages 1457 * with each process still needing the pdp/pd/pt infrstructure 1458 * (which still use pv_entry's). And don't just assume that 1459 * every PT will be completely filled up. So don't make it 1460 * too small. 1461 */ 1462 entry_max = maxproc * 32 + vm_page_array_size / 16; 1463 TUNABLE_LONG_FETCH("vm.pmap.pv_entries", &entry_max); 1464 vm_pmap_pv_entries = entry_max; 1465 1466 /* 1467 * Subtract out pages already installed in the zone (hack) 1468 */ 1469 if (entry_max <= MINPV) 1470 entry_max = MINPV; 1471 1472 zinitna(pvzone, NULL, 0, entry_max, ZONE_INTERRUPT); 1473 1474 /* 1475 * Enable dynamic deletion of empty higher-level page table pages 1476 * by default only if system memory is < 8GB (use 7GB for slop). 1477 * This can save a little memory, but imposes significant 1478 * performance overhead for things like bulk builds, and for programs 1479 * which do a lot of memory mapping and memory unmapping. 1480 */ 1481 #if 0 1482 if (pmap_dynamic_delete < 0) { 1483 if (vmstats.v_page_count < 7LL * 1024 * 1024 * 1024 / PAGE_SIZE) 1484 pmap_dynamic_delete = 1; 1485 else 1486 pmap_dynamic_delete = 0; 1487 } 1488 #endif 1489 /* 1490 * Disable so vm_map_backing iterations do not race 1491 */ 1492 pmap_dynamic_delete = 0; 1493 1494 /* 1495 * Automatic detection of Intel meltdown bug requiring user/kernel 1496 * mmap isolation. 1497 * 1498 * Currently there are so many Intel cpu's impacted that its better 1499 * to whitelist future Intel CPUs. Most? AMD cpus are not impacted 1500 * so the default is off for AMD. 1501 */ 1502 if (meltdown_mitigation < 0) { 1503 if (cpu_vendor_id == CPU_VENDOR_INTEL) { 1504 meltdown_mitigation = 1; 1505 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_RDCL_NO) 1506 meltdown_mitigation = 0; 1507 } else { 1508 meltdown_mitigation = 0; 1509 } 1510 } 1511 if (meltdown_mitigation) { 1512 kprintf("machdep.meltdown_mitigation enabled to " 1513 "protect against (mostly Intel) meltdown bug\n"); 1514 kprintf("system call performance will be impacted\n"); 1515 } 1516 1517 pmap_init2_iso_pmap(); 1518 } 1519 1520 /* 1521 * Create the isolation pmap template. Once created, the template 1522 * is static and its PML4e entries are used to populate the 1523 * kernel portion of any isolated user pmaps. 1524 * 1525 * Our isolation pmap must contain: 1526 * (1) trampoline area for all cpus 1527 * (2) common_tss area for all cpus (its part of the trampoline area now) 1528 * (3) IDT for all cpus 1529 * (4) GDT for all cpus 1530 */ 1531 static void 1532 pmap_init2_iso_pmap(void) 1533 { 1534 int n; 1535 1536 if (bootverbose) 1537 kprintf("Initialize isolation pmap\n"); 1538 1539 /* 1540 * Try to use our normal API calls to make this easier. We have 1541 * to scrap the shadowed kernel PDPs pmap_pinit() creates for our 1542 * iso_pmap. 1543 */ 1544 pmap_pinit(&iso_pmap); 1545 bzero(iso_pmap.pm_pml4, PAGE_SIZE); 1546 1547 /* 1548 * Install areas needed by the cpu and trampoline. 1549 */ 1550 for (n = 0; n < ncpus; ++n) { 1551 struct privatespace *ps; 1552 1553 ps = CPU_prvspace[n]; 1554 pmap_init_iso_range((vm_offset_t)&ps->trampoline, 1555 sizeof(ps->trampoline)); 1556 pmap_init_iso_range((vm_offset_t)&ps->dblstack, 1557 sizeof(ps->dblstack)); 1558 pmap_init_iso_range((vm_offset_t)&ps->dbgstack, 1559 sizeof(ps->dbgstack)); 1560 pmap_init_iso_range((vm_offset_t)&ps->common_tss, 1561 sizeof(ps->common_tss)); 1562 pmap_init_iso_range(r_idt_arr[n].rd_base, 1563 r_idt_arr[n].rd_limit + 1); 1564 } 1565 pmap_init_iso_range((register_t)gdt, sizeof(gdt)); 1566 pmap_init_iso_range((vm_offset_t)(int *)btext, 1567 (vm_offset_t)(int *)etext - 1568 (vm_offset_t)(int *)btext); 1569 1570 #if 0 1571 kprintf("Dump iso_pmap:\n"); 1572 dump_pmap(&iso_pmap, vtophys(iso_pmap.pm_pml4), 0, 0); 1573 kprintf("\nDump kernel_pmap:\n"); 1574 dump_pmap(&kernel_pmap, vtophys(kernel_pmap.pm_pml4), 0, 0); 1575 #endif 1576 } 1577 1578 /* 1579 * This adds a kernel virtual address range to the isolation pmap. 1580 */ 1581 static void 1582 pmap_init_iso_range(vm_offset_t base, size_t bytes) 1583 { 1584 pv_entry_t pv; 1585 pv_entry_t pvp; 1586 pt_entry_t *ptep; 1587 pt_entry_t pte; 1588 vm_offset_t va; 1589 1590 if (bootverbose) { 1591 kprintf("isolate %016jx-%016jx (%zd)\n", 1592 base, base + bytes, bytes); 1593 } 1594 va = base & ~(vm_offset_t)PAGE_MASK; 1595 while (va < base + bytes) { 1596 if ((va & PDRMASK) == 0 && va + NBPDR <= base + bytes && 1597 (ptep = pmap_pt(&kernel_pmap, va)) != NULL && 1598 (*ptep & kernel_pmap.pmap_bits[PG_V_IDX]) && 1599 (*ptep & kernel_pmap.pmap_bits[PG_PS_IDX])) { 1600 /* 1601 * Use 2MB pages if possible 1602 */ 1603 pte = *ptep; 1604 pv = pmap_allocpte(&iso_pmap, pmap_pd_pindex(va), &pvp); 1605 ptep = pv_pte_lookup(pv, (va >> PDRSHIFT) & 511); 1606 *ptep = pte; 1607 va += NBPDR; 1608 } else { 1609 /* 1610 * Otherwise use 4KB pages 1611 */ 1612 pv = pmap_allocpte(&iso_pmap, pmap_pt_pindex(va), &pvp); 1613 ptep = pv_pte_lookup(pv, (va >> PAGE_SHIFT) & 511); 1614 *ptep = vtophys(va) | kernel_pmap.pmap_bits[PG_RW_IDX] | 1615 kernel_pmap.pmap_bits[PG_V_IDX] | 1616 kernel_pmap.pmap_bits[PG_A_IDX] | 1617 kernel_pmap.pmap_bits[PG_M_IDX]; 1618 1619 va += PAGE_SIZE; 1620 } 1621 pv_put(pv); 1622 pv_put(pvp); 1623 } 1624 } 1625 1626 #if 0 1627 /* 1628 * Useful debugging pmap dumper, do not remove (#if 0 when not in use) 1629 */ 1630 static 1631 void 1632 dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base) 1633 { 1634 pt_entry_t *ptp; 1635 vm_offset_t incr; 1636 int i; 1637 1638 switch(level) { 1639 case 0: /* PML4e page, 512G entries */ 1640 incr = (1LL << 48) / 512; 1641 break; 1642 case 1: /* PDP page, 1G entries */ 1643 incr = (1LL << 39) / 512; 1644 break; 1645 case 2: /* PD page, 2MB entries */ 1646 incr = (1LL << 30) / 512; 1647 break; 1648 case 3: /* PT page, 4KB entries */ 1649 incr = (1LL << 21) / 512; 1650 break; 1651 default: 1652 incr = 0; 1653 break; 1654 } 1655 1656 if (level == 0) 1657 kprintf("cr3 %016jx @ va=%016jx\n", pte, base); 1658 ptp = (void *)PHYS_TO_DMAP(pte & ~(pt_entry_t)PAGE_MASK); 1659 for (i = 0; i < 512; ++i) { 1660 if (level == 0 && i == 128) 1661 base += 0xFFFF000000000000LLU; 1662 if (ptp[i]) { 1663 kprintf("%*.*s ", level * 4, level * 4, ""); 1664 if (level == 1 && (ptp[i] & 0x180) == 0x180) { 1665 kprintf("va=%016jx %3d term %016jx (1GB)\n", 1666 base, i, ptp[i]); 1667 } else if (level == 2 && (ptp[i] & 0x180) == 0x180) { 1668 kprintf("va=%016jx %3d term %016jx (2MB)\n", 1669 base, i, ptp[i]); 1670 } else if (level == 3) { 1671 kprintf("va=%016jx %3d term %016jx\n", 1672 base, i, ptp[i]); 1673 } else { 1674 kprintf("va=%016jx %3d deep %016jx\n", 1675 base, i, ptp[i]); 1676 dump_pmap(pmap, ptp[i], level + 1, base); 1677 } 1678 } 1679 base += incr; 1680 } 1681 } 1682 1683 #endif 1684 1685 /* 1686 * Typically used to initialize a fictitious page by vm/device_pager.c 1687 */ 1688 void 1689 pmap_page_init(struct vm_page *m) 1690 { 1691 vm_page_init(m); 1692 m->md.interlock_count = 0; 1693 } 1694 1695 /*************************************************** 1696 * Low level helper routines..... 1697 ***************************************************/ 1698 1699 /* 1700 * Extract the physical page address associated with the map/VA pair. 1701 * The page must be wired for this to work reliably. 1702 */ 1703 vm_paddr_t 1704 pmap_extract(pmap_t pmap, vm_offset_t va, void **handlep) 1705 { 1706 vm_paddr_t rtval; 1707 pv_entry_t pt_pv; 1708 pt_entry_t *ptep; 1709 1710 rtval = 0; 1711 if (va >= VM_MAX_USER_ADDRESS) { 1712 /* 1713 * Kernel page directories might be direct-mapped and 1714 * there is typically no PV tracking of pte's 1715 */ 1716 pd_entry_t *pt; 1717 1718 pt = pmap_pt(pmap, va); 1719 if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) { 1720 if (*pt & pmap->pmap_bits[PG_PS_IDX]) { 1721 rtval = *pt & PG_PS_FRAME; 1722 rtval |= va & PDRMASK; 1723 } else { 1724 ptep = pmap_pt_to_pte(*pt, va); 1725 if (*pt & pmap->pmap_bits[PG_V_IDX]) { 1726 rtval = *ptep & PG_FRAME; 1727 rtval |= va & PAGE_MASK; 1728 } 1729 } 1730 } 1731 if (handlep) 1732 *handlep = NULL; 1733 } else { 1734 /* 1735 * User pages currently do not direct-map the page directory 1736 * and some pages might not used managed PVs. But all PT's 1737 * will have a PV. 1738 */ 1739 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL); 1740 if (pt_pv) { 1741 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 1742 if (*ptep & pmap->pmap_bits[PG_V_IDX]) { 1743 rtval = *ptep & PG_FRAME; 1744 rtval |= va & PAGE_MASK; 1745 } 1746 if (handlep) 1747 *handlep = pt_pv; /* locked until done */ 1748 else 1749 pv_put (pt_pv); 1750 } else if (handlep) { 1751 *handlep = NULL; 1752 } 1753 } 1754 return rtval; 1755 } 1756 1757 void 1758 pmap_extract_done(void *handle) 1759 { 1760 if (handle) 1761 pv_put((pv_entry_t)handle); 1762 } 1763 1764 /* 1765 * Similar to extract but checks protections, SMP-friendly short-cut for 1766 * vm_fault_page[_quick](). Can return NULL to cause the caller to 1767 * fall-through to the real fault code. Does not work with HVM page 1768 * tables. 1769 * 1770 * if busyp is NULL the returned page, if not NULL, is held (and not busied). 1771 * 1772 * If busyp is not NULL and this function sets *busyp non-zero, the returned 1773 * page is busied (and not held). 1774 * 1775 * If busyp is not NULL and this function sets *busyp to zero, the returned 1776 * page is held (and not busied). 1777 * 1778 * If VM_PROT_WRITE is set in prot, and the pte is already writable, the 1779 * returned page will be dirtied. If the pte is not already writable NULL 1780 * is returned. In otherwords, if the bit is set and a vm_page_t is returned, 1781 * any COW will already have happened and that page can be written by the 1782 * caller. 1783 * 1784 * WARNING! THE RETURNED PAGE IS ONLY HELD AND NOT SUITABLE FOR READING 1785 * OR WRITING AS-IS. 1786 */ 1787 vm_page_t 1788 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot, int *busyp) 1789 { 1790 if (pmap && 1791 va < VM_MAX_USER_ADDRESS && 1792 (pmap->pm_flags & PMAP_HVM) == 0) { 1793 pv_entry_t pt_pv; 1794 pv_entry_t pte_pv; 1795 pt_entry_t *ptep; 1796 pt_entry_t req; 1797 vm_page_t m; 1798 int error; 1799 1800 req = pmap->pmap_bits[PG_V_IDX] | 1801 pmap->pmap_bits[PG_U_IDX]; 1802 if (prot & VM_PROT_WRITE) 1803 req |= pmap->pmap_bits[PG_RW_IDX]; 1804 1805 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL); 1806 if (pt_pv == NULL) 1807 return (NULL); 1808 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 1809 if ((*ptep & req) != req) { 1810 pv_put(pt_pv); 1811 return (NULL); 1812 } 1813 pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), NULL, &error); 1814 if (pte_pv && error == 0) { 1815 m = pte_pv->pv_m; 1816 if (prot & VM_PROT_WRITE) { 1817 /* interlocked by presence of pv_entry */ 1818 vm_page_dirty(m); 1819 } 1820 if (busyp) { 1821 if (prot & VM_PROT_WRITE) { 1822 if (vm_page_busy_try(m, TRUE)) 1823 m = NULL; 1824 *busyp = 1; 1825 } else { 1826 vm_page_hold(m); 1827 *busyp = 0; 1828 } 1829 } else { 1830 vm_page_hold(m); 1831 } 1832 pv_put(pte_pv); 1833 } else if (pte_pv) { 1834 pv_drop(pte_pv); 1835 m = NULL; 1836 } else { 1837 /* error, since we didn't request a placemarker */ 1838 m = NULL; 1839 } 1840 pv_put(pt_pv); 1841 return(m); 1842 } else { 1843 return(NULL); 1844 } 1845 } 1846 1847 /* 1848 * Extract the physical page address associated kernel virtual address. 1849 */ 1850 vm_paddr_t 1851 pmap_kextract(vm_offset_t va) 1852 { 1853 pd_entry_t pt; /* pt entry in pd */ 1854 vm_paddr_t pa; 1855 1856 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { 1857 pa = DMAP_TO_PHYS(va); 1858 } else { 1859 pt = *vtopt(va); 1860 if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) { 1861 pa = (pt & PG_PS_FRAME) | (va & PDRMASK); 1862 } else { 1863 /* 1864 * Beware of a concurrent promotion that changes the 1865 * PDE at this point! For example, vtopte() must not 1866 * be used to access the PTE because it would use the 1867 * new PDE. It is, however, safe to use the old PDE 1868 * because the page table page is preserved by the 1869 * promotion. 1870 */ 1871 pa = *pmap_pt_to_pte(pt, va); 1872 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1873 } 1874 } 1875 return pa; 1876 } 1877 1878 /*************************************************** 1879 * Low level mapping routines..... 1880 ***************************************************/ 1881 1882 /* 1883 * Add a wired page to the KVA and invalidate the mapping on all CPUs. 1884 */ 1885 void 1886 pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1887 { 1888 pt_entry_t *ptep; 1889 pt_entry_t npte; 1890 1891 npte = pa | 1892 kernel_pmap.pmap_bits[PG_RW_IDX] | 1893 kernel_pmap.pmap_bits[PG_V_IDX]; 1894 // pgeflag; 1895 ptep = vtopte(va); 1896 #if 1 1897 pmap_inval_smp(&kernel_pmap, va, 1, ptep, npte); 1898 #else 1899 /* FUTURE */ 1900 if (*ptep) 1901 pmap_inval_smp(&kernel_pmap, va, ptep, npte); 1902 else 1903 *ptep = npte; 1904 #endif 1905 } 1906 1907 /* 1908 * Similar to pmap_kenter(), except we only invalidate the mapping on the 1909 * current CPU. Returns 0 if the previous pte was 0, 1 if it wasn't 1910 * (caller can conditionalize calling smp_invltlb()). 1911 */ 1912 int 1913 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa) 1914 { 1915 pt_entry_t *ptep; 1916 pt_entry_t npte; 1917 int res; 1918 1919 npte = pa | kernel_pmap.pmap_bits[PG_RW_IDX] | 1920 kernel_pmap.pmap_bits[PG_V_IDX]; 1921 // npte |= pgeflag; 1922 ptep = vtopte(va); 1923 #if 1 1924 res = 1; 1925 #else 1926 /* FUTURE */ 1927 res = (*ptep != 0); 1928 #endif 1929 atomic_swap_long(ptep, npte); 1930 cpu_invlpg((void *)va); 1931 1932 return res; 1933 } 1934 1935 /* 1936 * Enter addresses into the kernel pmap but don't bother 1937 * doing any tlb invalidations. Caller will do a rollup 1938 * invalidation via pmap_rollup_inval(). 1939 */ 1940 int 1941 pmap_kenter_noinval(vm_offset_t va, vm_paddr_t pa) 1942 { 1943 pt_entry_t *ptep; 1944 pt_entry_t npte; 1945 int res; 1946 1947 npte = pa | 1948 kernel_pmap.pmap_bits[PG_RW_IDX] | 1949 kernel_pmap.pmap_bits[PG_V_IDX]; 1950 // pgeflag; 1951 ptep = vtopte(va); 1952 #if 1 1953 res = 1; 1954 #else 1955 /* FUTURE */ 1956 res = (*ptep != 0); 1957 #endif 1958 atomic_swap_long(ptep, npte); 1959 cpu_invlpg((void *)va); 1960 1961 return res; 1962 } 1963 1964 /* 1965 * remove a page from the kernel pagetables 1966 */ 1967 void 1968 pmap_kremove(vm_offset_t va) 1969 { 1970 pt_entry_t *ptep; 1971 1972 ptep = vtopte(va); 1973 pmap_inval_smp(&kernel_pmap, va, 1, ptep, 0); 1974 } 1975 1976 void 1977 pmap_kremove_quick(vm_offset_t va) 1978 { 1979 pt_entry_t *ptep; 1980 1981 ptep = vtopte(va); 1982 (void)pte_load_clear(ptep); 1983 cpu_invlpg((void *)va); 1984 } 1985 1986 /* 1987 * Remove addresses from the kernel pmap but don't bother 1988 * doing any tlb invalidations. Caller will do a rollup 1989 * invalidation via pmap_rollup_inval(). 1990 */ 1991 void 1992 pmap_kremove_noinval(vm_offset_t va) 1993 { 1994 pt_entry_t *ptep; 1995 1996 ptep = vtopte(va); 1997 (void)pte_load_clear(ptep); 1998 } 1999 2000 /* 2001 * XXX these need to be recoded. They are not used in any critical path. 2002 */ 2003 void 2004 pmap_kmodify_rw(vm_offset_t va) 2005 { 2006 atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]); 2007 cpu_invlpg((void *)va); 2008 } 2009 2010 /* NOT USED 2011 void 2012 pmap_kmodify_nc(vm_offset_t va) 2013 { 2014 atomic_set_long(vtopte(va), PG_N); 2015 cpu_invlpg((void *)va); 2016 } 2017 */ 2018 2019 /* 2020 * Used to map a range of physical addresses into kernel virtual 2021 * address space during the low level boot, typically to map the 2022 * dump bitmap, message buffer, and vm_page_array. 2023 * 2024 * These mappings are typically made at some pointer after the end of the 2025 * kernel text+data. 2026 * 2027 * We could return PHYS_TO_DMAP(start) here and not allocate any 2028 * via (*virtp), but then kmem from userland and kernel dumps won't 2029 * have access to the related pointers. 2030 */ 2031 vm_offset_t 2032 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot) 2033 { 2034 vm_offset_t va; 2035 vm_offset_t va_start; 2036 2037 /*return PHYS_TO_DMAP(start);*/ 2038 2039 va_start = *virtp; 2040 va = va_start; 2041 2042 while (start < end) { 2043 pmap_kenter_quick(va, start); 2044 va += PAGE_SIZE; 2045 start += PAGE_SIZE; 2046 } 2047 *virtp = va; 2048 return va_start; 2049 } 2050 2051 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 2052 2053 /* 2054 * Remove the specified set of pages from the data and instruction caches. 2055 * 2056 * In contrast to pmap_invalidate_cache_range(), this function does not 2057 * rely on the CPU's self-snoop feature, because it is intended for use 2058 * when moving pages into a different cache domain. 2059 */ 2060 void 2061 pmap_invalidate_cache_pages(vm_page_t *pages, int count) 2062 { 2063 vm_offset_t daddr, eva; 2064 int i; 2065 2066 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 2067 (cpu_feature & CPUID_CLFSH) == 0) 2068 wbinvd(); 2069 else { 2070 cpu_mfence(); 2071 for (i = 0; i < count; i++) { 2072 daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i])); 2073 eva = daddr + PAGE_SIZE; 2074 for (; daddr < eva; daddr += cpu_clflush_line_size) 2075 clflush(daddr); 2076 } 2077 cpu_mfence(); 2078 } 2079 } 2080 2081 void 2082 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 2083 { 2084 KASSERT((sva & PAGE_MASK) == 0, 2085 ("pmap_invalidate_cache_range: sva not page-aligned")); 2086 KASSERT((eva & PAGE_MASK) == 0, 2087 ("pmap_invalidate_cache_range: eva not page-aligned")); 2088 2089 if (cpu_feature & CPUID_SS) { 2090 ; /* If "Self Snoop" is supported, do nothing. */ 2091 } else { 2092 /* Globally invalidate caches */ 2093 cpu_wbinvd_on_all_cpus(); 2094 } 2095 } 2096 2097 /* 2098 * Invalidate the specified range of virtual memory on all cpus associated 2099 * with the pmap. 2100 */ 2101 void 2102 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2103 { 2104 pmap_inval_smp(pmap, sva, (eva - sva) >> PAGE_SHIFT, NULL, 0); 2105 } 2106 2107 /* 2108 * Add a list of wired pages to the kva. This routine is used for temporary 2109 * kernel mappings such as those found in buffer cache buffer. Page 2110 * modifications and accesses are not tracked or recorded. 2111 * 2112 * NOTE! Old mappings are simply overwritten, and we cannot assume relaxed 2113 * semantics as previous mappings may have been zerod without any 2114 * invalidation. 2115 * 2116 * The page *must* be wired. 2117 */ 2118 static __inline void 2119 _pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count, int doinval) 2120 { 2121 vm_offset_t end_va; 2122 vm_offset_t va; 2123 2124 end_va = beg_va + count * PAGE_SIZE; 2125 2126 for (va = beg_va; va < end_va; va += PAGE_SIZE) { 2127 pt_entry_t pte; 2128 pt_entry_t *ptep; 2129 2130 ptep = vtopte(va); 2131 pte = VM_PAGE_TO_PHYS(*m) | 2132 kernel_pmap.pmap_bits[PG_RW_IDX] | 2133 kernel_pmap.pmap_bits[PG_V_IDX] | 2134 kernel_pmap.pmap_cache_bits_pte[(*m)->pat_mode]; 2135 // pgeflag; 2136 atomic_swap_long(ptep, pte); 2137 m++; 2138 } 2139 if (doinval) 2140 pmap_invalidate_range(&kernel_pmap, beg_va, end_va); 2141 } 2142 2143 void 2144 pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count) 2145 { 2146 _pmap_qenter(beg_va, m, count, 1); 2147 } 2148 2149 void 2150 pmap_qenter_noinval(vm_offset_t beg_va, vm_page_t *m, int count) 2151 { 2152 _pmap_qenter(beg_va, m, count, 0); 2153 } 2154 2155 /* 2156 * This routine jerks page mappings from the kernel -- it is meant only 2157 * for temporary mappings such as those found in buffer cache buffers. 2158 * No recording modified or access status occurs. 2159 * 2160 * MPSAFE, INTERRUPT SAFE (cluster callback) 2161 */ 2162 void 2163 pmap_qremove(vm_offset_t beg_va, int count) 2164 { 2165 vm_offset_t end_va; 2166 vm_offset_t va; 2167 2168 end_va = beg_va + count * PAGE_SIZE; 2169 2170 for (va = beg_va; va < end_va; va += PAGE_SIZE) { 2171 pt_entry_t *pte; 2172 2173 pte = vtopte(va); 2174 (void)pte_load_clear(pte); 2175 cpu_invlpg((void *)va); 2176 } 2177 pmap_invalidate_range(&kernel_pmap, beg_va, end_va); 2178 } 2179 2180 /* 2181 * This routine removes temporary kernel mappings, only invalidating them 2182 * on the current cpu. It should only be used under carefully controlled 2183 * conditions. 2184 */ 2185 void 2186 pmap_qremove_quick(vm_offset_t beg_va, int count) 2187 { 2188 vm_offset_t end_va; 2189 vm_offset_t va; 2190 2191 end_va = beg_va + count * PAGE_SIZE; 2192 2193 for (va = beg_va; va < end_va; va += PAGE_SIZE) { 2194 pt_entry_t *pte; 2195 2196 pte = vtopte(va); 2197 (void)pte_load_clear(pte); 2198 cpu_invlpg((void *)va); 2199 } 2200 } 2201 2202 /* 2203 * This routine removes temporary kernel mappings *without* invalidating 2204 * the TLB. It can only be used on permanent kva reservations such as those 2205 * found in buffer cache buffers, under carefully controlled circumstances. 2206 * 2207 * NOTE: Repopulating these KVAs requires unconditional invalidation. 2208 * (pmap_qenter() does unconditional invalidation). 2209 */ 2210 void 2211 pmap_qremove_noinval(vm_offset_t beg_va, int count) 2212 { 2213 vm_offset_t end_va; 2214 vm_offset_t va; 2215 2216 end_va = beg_va + count * PAGE_SIZE; 2217 2218 for (va = beg_va; va < end_va; va += PAGE_SIZE) { 2219 pt_entry_t *pte; 2220 2221 pte = vtopte(va); 2222 (void)pte_load_clear(pte); 2223 } 2224 } 2225 2226 /* 2227 * Create a new thread and optionally associate it with a (new) process. 2228 * NOTE! the new thread's cpu may not equal the current cpu. 2229 */ 2230 void 2231 pmap_init_thread(thread_t td) 2232 { 2233 /* enforce pcb placement & alignment */ 2234 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1; 2235 td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF); 2236 td->td_savefpu = &td->td_pcb->pcb_save; 2237 td->td_sp = (char *)td->td_pcb; /* no -16 */ 2238 } 2239 2240 /* 2241 * This routine directly affects the fork perf for a process. 2242 */ 2243 void 2244 pmap_init_proc(struct proc *p) 2245 { 2246 } 2247 2248 static void 2249 pmap_pinit_defaults(struct pmap *pmap) 2250 { 2251 bcopy(pmap_bits_default, pmap->pmap_bits, 2252 sizeof(pmap_bits_default)); 2253 bcopy(protection_codes, pmap->protection_codes, 2254 sizeof(protection_codes)); 2255 bcopy(pat_pte_index, pmap->pmap_cache_bits_pte, 2256 sizeof(pat_pte_index)); 2257 bcopy(pat_pde_index, pmap->pmap_cache_bits_pde, 2258 sizeof(pat_pte_index)); 2259 pmap->pmap_cache_mask_pte = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT; 2260 pmap->pmap_cache_mask_pde = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PDE_PAT; 2261 pmap->copyinstr = std_copyinstr; 2262 pmap->copyin = std_copyin; 2263 pmap->copyout = std_copyout; 2264 pmap->fubyte = std_fubyte; 2265 pmap->subyte = std_subyte; 2266 pmap->fuword32 = std_fuword32; 2267 pmap->fuword64 = std_fuword64; 2268 pmap->suword32 = std_suword32; 2269 pmap->suword64 = std_suword64; 2270 pmap->swapu32 = std_swapu32; 2271 pmap->swapu64 = std_swapu64; 2272 pmap->fuwordadd32 = std_fuwordadd32; 2273 pmap->fuwordadd64 = std_fuwordadd64; 2274 } 2275 /* 2276 * Initialize pmap0/vmspace0. 2277 * 2278 * On architectures where the kernel pmap is not integrated into the user 2279 * process pmap, this pmap represents the process pmap, not the kernel pmap. 2280 * kernel_pmap should be used to directly access the kernel_pmap. 2281 */ 2282 void 2283 pmap_pinit0(struct pmap *pmap) 2284 { 2285 int i; 2286 2287 pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys); 2288 pmap->pm_count = 1; 2289 CPUMASK_ASSZERO(pmap->pm_active); 2290 pmap->pm_pvhint_pt = NULL; 2291 pmap->pm_pvhint_unused = NULL; 2292 RB_INIT(&pmap->pm_pvroot); 2293 spin_init(&pmap->pm_spin, "pmapinit0"); 2294 for (i = 0; i < PM_PLACEMARKS; ++i) 2295 pmap->pm_placemarks[i] = PM_NOPLACEMARK; 2296 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2297 pmap_pinit_defaults(pmap); 2298 } 2299 2300 /* 2301 * Initialize a preallocated and zeroed pmap structure, 2302 * such as one in a vmspace structure. 2303 */ 2304 static void 2305 pmap_pinit_simple(struct pmap *pmap) 2306 { 2307 int i; 2308 2309 /* 2310 * Misc initialization 2311 */ 2312 pmap->pm_count = 1; 2313 CPUMASK_ASSZERO(pmap->pm_active); 2314 pmap->pm_pvhint_pt = NULL; 2315 pmap->pm_pvhint_unused = NULL; 2316 pmap->pm_flags = PMAP_FLAG_SIMPLE; 2317 2318 pmap_pinit_defaults(pmap); 2319 2320 /* 2321 * Don't blow up locks/tokens on re-use (XXX fix/use drop code 2322 * for this). 2323 */ 2324 if (pmap->pm_pmlpv == NULL) { 2325 RB_INIT(&pmap->pm_pvroot); 2326 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2327 spin_init(&pmap->pm_spin, "pmapinitsimple"); 2328 for (i = 0; i < PM_PLACEMARKS; ++i) 2329 pmap->pm_placemarks[i] = PM_NOPLACEMARK; 2330 } 2331 } 2332 2333 void 2334 pmap_pinit(struct pmap *pmap) 2335 { 2336 pv_entry_t pv; 2337 int j; 2338 2339 if (pmap->pm_pmlpv) { 2340 if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) { 2341 pmap_puninit(pmap); 2342 } 2343 } 2344 2345 pmap_pinit_simple(pmap); 2346 pmap->pm_flags &= ~PMAP_FLAG_SIMPLE; 2347 2348 /* 2349 * No need to allocate page table space yet but we do need a valid 2350 * page directory table. 2351 */ 2352 if (pmap->pm_pml4 == NULL) { 2353 pmap->pm_pml4 = 2354 (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, 2355 PAGE_SIZE * 2, 2356 VM_SUBSYS_PML4); 2357 pmap->pm_pml4_iso = (void *)((char *)pmap->pm_pml4 + PAGE_SIZE); 2358 } 2359 2360 /* 2361 * Allocate the PML4e table, which wires it even though it isn't 2362 * being entered into some higher level page table (it being the 2363 * highest level). If one is already cached we don't have to do 2364 * anything. 2365 */ 2366 if ((pv = pmap->pm_pmlpv) == NULL) { 2367 pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL); 2368 pmap->pm_pmlpv = pv; 2369 pmap_kenter((vm_offset_t)pmap->pm_pml4, 2370 VM_PAGE_TO_PHYS(pv->pv_m)); 2371 pv_put(pv); 2372 2373 /* 2374 * Install DMAP and KMAP. 2375 */ 2376 for (j = 0; j < NDMPML4E; ++j) { 2377 pmap->pm_pml4[DMPML4I + j] = 2378 (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) | 2379 pmap->pmap_bits[PG_RW_IDX] | 2380 pmap->pmap_bits[PG_V_IDX] | 2381 pmap->pmap_bits[PG_A_IDX]; 2382 } 2383 for (j = 0; j < NKPML4E; ++j) { 2384 pmap->pm_pml4[KPML4I + j] = 2385 (KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) | 2386 pmap->pmap_bits[PG_RW_IDX] | 2387 pmap->pmap_bits[PG_V_IDX] | 2388 pmap->pmap_bits[PG_A_IDX]; 2389 } 2390 2391 /* 2392 * install self-referential address mapping entry 2393 */ 2394 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) | 2395 pmap->pmap_bits[PG_V_IDX] | 2396 pmap->pmap_bits[PG_RW_IDX] | 2397 pmap->pmap_bits[PG_A_IDX]; 2398 } else { 2399 KKASSERT(pv->pv_m->flags & PG_MAPPED); 2400 KKASSERT(pv->pv_m->flags & PG_WRITEABLE); 2401 } 2402 KKASSERT(pmap->pm_pml4[255] == 0); 2403 2404 /* 2405 * When implementing an isolated userland pmap, a second PML4e table 2406 * is needed. We use pmap_pml4_pindex() + 1 for convenience, but 2407 * note that we do not operate on this table using our API functions 2408 * so handling of the + 1 case is mostly just to prevent implosions. 2409 * 2410 * We install an isolated version of the kernel PDPs into this 2411 * second PML4e table. The pmap code will mirror all user PDPs 2412 * between the primary and secondary PML4e table. 2413 */ 2414 if ((pv = pmap->pm_pmlpv_iso) == NULL && meltdown_mitigation && 2415 pmap != &iso_pmap) { 2416 pv = pmap_allocpte(pmap, pmap_pml4_pindex() + 1, NULL); 2417 pmap->pm_pmlpv_iso = pv; 2418 pmap_kenter((vm_offset_t)pmap->pm_pml4_iso, 2419 VM_PAGE_TO_PHYS(pv->pv_m)); 2420 pv_put(pv); 2421 2422 /* 2423 * Install an isolated version of the kernel pmap for 2424 * user consumption, using PDPs constructed in iso_pmap. 2425 */ 2426 for (j = 0; j < NKPML4E; ++j) { 2427 pmap->pm_pml4_iso[KPML4I + j] = 2428 iso_pmap.pm_pml4[KPML4I + j]; 2429 } 2430 } else if (pv) { 2431 KKASSERT(pv->pv_m->flags & PG_MAPPED); 2432 KKASSERT(pv->pv_m->flags & PG_WRITEABLE); 2433 } 2434 } 2435 2436 /* 2437 * Clean up a pmap structure so it can be physically freed. This routine 2438 * is called by the vmspace dtor function. A great deal of pmap data is 2439 * left passively mapped to improve vmspace management so we have a bit 2440 * of cleanup work to do here. 2441 */ 2442 void 2443 pmap_puninit(pmap_t pmap) 2444 { 2445 pv_entry_t pv; 2446 vm_page_t p; 2447 2448 KKASSERT(CPUMASK_TESTZERO(pmap->pm_active)); 2449 if ((pv = pmap->pm_pmlpv) != NULL) { 2450 if (pv_hold_try(pv) == 0) 2451 pv_lock(pv); 2452 KKASSERT(pv == pmap->pm_pmlpv); 2453 p = pmap_remove_pv_page(pv, 1); 2454 pv_free(pv, NULL); 2455 pv = NULL; /* safety */ 2456 pmap_kremove((vm_offset_t)pmap->pm_pml4); 2457 vm_page_busy_wait(p, FALSE, "pgpun"); 2458 KKASSERT(p->flags & PG_UNQUEUED); 2459 vm_page_unwire(p, 0); 2460 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE); 2461 vm_page_free(p); 2462 pmap->pm_pmlpv = NULL; 2463 } 2464 if ((pv = pmap->pm_pmlpv_iso) != NULL) { 2465 if (pv_hold_try(pv) == 0) 2466 pv_lock(pv); 2467 KKASSERT(pv == pmap->pm_pmlpv_iso); 2468 p = pmap_remove_pv_page(pv, 1); 2469 pv_free(pv, NULL); 2470 pv = NULL; /* safety */ 2471 pmap_kremove((vm_offset_t)pmap->pm_pml4_iso); 2472 vm_page_busy_wait(p, FALSE, "pgpun"); 2473 KKASSERT(p->flags & PG_UNQUEUED); 2474 vm_page_unwire(p, 0); 2475 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE); 2476 vm_page_free(p); 2477 pmap->pm_pmlpv_iso = NULL; 2478 } 2479 if (pmap->pm_pml4) { 2480 KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys)); 2481 kmem_free(&kernel_map, 2482 (vm_offset_t)pmap->pm_pml4, PAGE_SIZE * 2); 2483 pmap->pm_pml4 = NULL; 2484 pmap->pm_pml4_iso = NULL; 2485 } 2486 KKASSERT(pmap->pm_stats.resident_count == 0); 2487 KKASSERT(pmap->pm_stats.wired_count == 0); 2488 } 2489 2490 /* 2491 * This function is now unused (used to add the pmap to the pmap_list) 2492 */ 2493 void 2494 pmap_pinit2(struct pmap *pmap) 2495 { 2496 } 2497 2498 /* 2499 * This routine is called when various levels in the page table need to 2500 * be populated. This routine cannot fail. 2501 * 2502 * This function returns two locked pv_entry's, one representing the 2503 * requested pv and one representing the requested pv's parent pv. If 2504 * an intermediate page table does not exist it will be created, mapped, 2505 * wired, and the parent page table will be given an additional hold 2506 * count representing the presence of the child pv_entry. 2507 */ 2508 static 2509 pv_entry_t 2510 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp) 2511 { 2512 pt_entry_t *ptep; 2513 pt_entry_t *ptep_iso; 2514 pv_entry_t pv; 2515 pv_entry_t pvp; 2516 pt_entry_t v; 2517 vm_page_t m; 2518 int isnew; 2519 int ispt; 2520 2521 /* 2522 * If the pv already exists and we aren't being asked for the 2523 * parent page table page we can just return it. A locked+held pv 2524 * is returned. The pv will also have a second hold related to the 2525 * pmap association that we don't have to worry about. 2526 */ 2527 ispt = 0; 2528 pv = pv_alloc(pmap, ptepindex, &isnew); 2529 if (isnew == 0 && pvpp == NULL) 2530 return(pv); 2531 2532 /* 2533 * DragonFly doesn't use PV's to represent terminal PTEs any more. 2534 * The index range is still used for placemarkers, but not for 2535 * actual pv_entry's. 2536 */ 2537 KKASSERT(ptepindex >= pmap_pt_pindex(0)); 2538 2539 /* 2540 * Note that pt_pv's are only returned for user VAs. We assert that 2541 * a pt_pv is not being requested for kernel VAs. The kernel 2542 * pre-wires all higher-level page tables so don't overload managed 2543 * higher-level page tables on top of it! 2544 * 2545 * However, its convenient for us to allow the case when creating 2546 * iso_pmap. This is a bit of a hack but it simplifies iso_pmap 2547 * a lot. 2548 */ 2549 2550 /* 2551 * The kernel never uses managed PT/PD/PDP pages. 2552 */ 2553 KKASSERT(pmap != &kernel_pmap); 2554 2555 /* 2556 * Non-terminal PVs allocate a VM page to represent the page table, 2557 * so we have to resolve pvp and calculate ptepindex for the pvp 2558 * and then for the page table entry index in the pvp for 2559 * fall-through. 2560 */ 2561 if (ptepindex < pmap_pd_pindex(0)) { 2562 /* 2563 * pv is PT, pvp is PD 2564 */ 2565 ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT; 2566 ptepindex += NUPTE_TOTAL + NUPT_TOTAL; 2567 pvp = pmap_allocpte(pmap, ptepindex, NULL); 2568 2569 /* 2570 * PT index in PD 2571 */ 2572 ptepindex = pv->pv_pindex - pmap_pt_pindex(0); 2573 ptepindex &= ((1ul << NPDEPGSHIFT) - 1); 2574 ispt = 1; 2575 } else if (ptepindex < pmap_pdp_pindex(0)) { 2576 /* 2577 * pv is PD, pvp is PDP 2578 * 2579 * SIMPLE PMAP NOTE: Simple pmaps do not allocate above 2580 * the PD. 2581 */ 2582 ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT; 2583 ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL; 2584 2585 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) { 2586 KKASSERT(pvpp == NULL); 2587 pvp = NULL; 2588 } else { 2589 pvp = pmap_allocpte(pmap, ptepindex, NULL); 2590 } 2591 2592 /* 2593 * PD index in PDP 2594 */ 2595 ptepindex = pv->pv_pindex - pmap_pd_pindex(0); 2596 ptepindex &= ((1ul << NPDPEPGSHIFT) - 1); 2597 } else if (ptepindex < pmap_pml4_pindex()) { 2598 /* 2599 * pv is PDP, pvp is the root pml4 table 2600 */ 2601 pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL); 2602 2603 /* 2604 * PDP index in PML4 2605 */ 2606 ptepindex = pv->pv_pindex - pmap_pdp_pindex(0); 2607 ptepindex &= ((1ul << NPML4EPGSHIFT) - 1); 2608 } else { 2609 /* 2610 * pv represents the top-level PML4, there is no parent. 2611 */ 2612 pvp = NULL; 2613 } 2614 2615 if (isnew == 0) 2616 goto notnew; 2617 2618 /* 2619 * (isnew) is TRUE. 2620 * 2621 * (1) Add a wire count to the parent page table (pvp). 2622 * (2) Allocate a VM page for the page table. 2623 * (3) Enter the VM page into the parent page table. 2624 * 2625 * page table pages are marked PG_WRITEABLE and PG_MAPPED. 2626 */ 2627 if (pvp) 2628 vm_page_wire_quick(pvp->pv_m); 2629 2630 for (;;) { 2631 m = vm_page_alloc(NULL, pv->pv_pindex, 2632 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | 2633 VM_ALLOC_INTERRUPT); 2634 if (m) 2635 break; 2636 vm_wait(0); 2637 } 2638 vm_page_wire(m); /* wire for mapping in parent */ 2639 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 2640 m->valid = VM_PAGE_BITS_ALL; 2641 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_UNQUEUED); 2642 KKASSERT(m->queue == PQ_NONE); 2643 2644 pv->pv_m = m; 2645 2646 /* 2647 * (isnew) is TRUE. 2648 * 2649 * Wire the page into pvp. Bump the resident_count for the pmap. 2650 * There is no pvp for the top level, address the pm_pml4[] array 2651 * directly. 2652 * 2653 * If the caller wants the parent we return it, otherwise 2654 * we just put it away. 2655 * 2656 * No interlock is needed for pte 0 -> non-zero. 2657 * 2658 * In the situation where *ptep is valid we might have an unmanaged 2659 * page table page shared from another page table which we need to 2660 * unshare before installing our private page table page. 2661 */ 2662 if (pvp) { 2663 v = VM_PAGE_TO_PHYS(m) | 2664 (pmap->pmap_bits[PG_RW_IDX] | 2665 pmap->pmap_bits[PG_V_IDX] | 2666 pmap->pmap_bits[PG_A_IDX]); 2667 if (ptepindex < NUPTE_USER) 2668 v |= pmap->pmap_bits[PG_U_IDX]; 2669 if (ptepindex < pmap_pt_pindex(0)) 2670 v |= pmap->pmap_bits[PG_M_IDX]; 2671 2672 ptep = pv_pte_lookup(pvp, ptepindex); 2673 if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso) 2674 ptep_iso = pv_pte_lookup(pmap->pm_pmlpv_iso, ptepindex); 2675 else 2676 ptep_iso = NULL; 2677 if (*ptep & pmap->pmap_bits[PG_V_IDX]) { 2678 panic("pmap_allocpte: ptpte present without pv_entry!"); 2679 } else { 2680 pt_entry_t pte; 2681 2682 pte = atomic_swap_long(ptep, v); 2683 if (ptep_iso) 2684 atomic_swap_long(ptep_iso, v); 2685 if (pte != 0) { 2686 kprintf("install pgtbl mixup 0x%016jx " 2687 "old/new 0x%016jx/0x%016jx\n", 2688 (intmax_t)ptepindex, pte, v); 2689 } 2690 } 2691 } 2692 vm_page_wakeup(m); 2693 2694 notnew: 2695 /* 2696 * (isnew) may be TRUE or FALSE. 2697 */ 2698 if (pvp) { 2699 KKASSERT(pvp->pv_m != NULL); 2700 ptep = pv_pte_lookup(pvp, ptepindex); 2701 v = VM_PAGE_TO_PHYS(pv->pv_m) | 2702 (pmap->pmap_bits[PG_RW_IDX] | 2703 pmap->pmap_bits[PG_V_IDX] | 2704 pmap->pmap_bits[PG_A_IDX]); 2705 if (ptepindex < NUPTE_USER) 2706 v |= pmap->pmap_bits[PG_U_IDX]; 2707 if (ptepindex < pmap_pt_pindex(0)) 2708 v |= pmap->pmap_bits[PG_M_IDX]; 2709 if (*ptep != v) { 2710 kprintf("mismatched upper level pt %016jx/%016jx\n", 2711 *ptep, v); 2712 } 2713 } 2714 if (pvpp) 2715 *pvpp = pvp; 2716 else if (pvp) 2717 pv_put(pvp); 2718 return (pv); 2719 } 2720 2721 /* 2722 * Release any resources held by the given physical map. 2723 * 2724 * Called when a pmap initialized by pmap_pinit is being released. Should 2725 * only be called if the map contains no valid mappings. 2726 */ 2727 struct pmap_release_info { 2728 pmap_t pmap; 2729 int retry; 2730 pv_entry_t pvp; 2731 }; 2732 2733 static int pmap_release_callback(pv_entry_t pv, void *data); 2734 2735 void 2736 pmap_release(struct pmap *pmap) 2737 { 2738 struct pmap_release_info info; 2739 2740 KASSERT(CPUMASK_TESTZERO(pmap->pm_active), 2741 ("pmap still active! %016jx", 2742 (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active))); 2743 2744 /* 2745 * There is no longer a pmap_list, if there were we would remove the 2746 * pmap from it here. 2747 */ 2748 2749 /* 2750 * Pull pv's off the RB tree in order from low to high and release 2751 * each page. 2752 */ 2753 info.pmap = pmap; 2754 do { 2755 info.retry = 0; 2756 info.pvp = NULL; 2757 2758 spin_lock(&pmap->pm_spin); 2759 RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL, 2760 pmap_release_callback, &info); 2761 spin_unlock(&pmap->pm_spin); 2762 2763 if (info.pvp) 2764 pv_put(info.pvp); 2765 } while (info.retry); 2766 2767 2768 /* 2769 * One resident page (the pml4 page) should remain. Two if 2770 * the pmap has implemented an isolated userland PML4E table. 2771 * No wired pages should remain. 2772 */ 2773 int expected_res = 0; 2774 2775 if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0) 2776 ++expected_res; 2777 if (pmap->pm_pmlpv_iso) 2778 ++expected_res; 2779 2780 #if 1 2781 if (pmap->pm_stats.resident_count != expected_res || 2782 pmap->pm_stats.wired_count != 0) { 2783 kprintf("fatal pmap problem - pmap %p flags %08x " 2784 "rescnt=%jd wirecnt=%jd\n", 2785 pmap, 2786 pmap->pm_flags, 2787 pmap->pm_stats.resident_count, 2788 pmap->pm_stats.wired_count); 2789 tsleep(pmap, 0, "DEAD", 0); 2790 } 2791 #else 2792 KKASSERT(pmap->pm_stats.resident_count == expected_res); 2793 KKASSERT(pmap->pm_stats.wired_count == 0); 2794 #endif 2795 } 2796 2797 /* 2798 * Called from low to high. We must cache the proper parent pv so we 2799 * can adjust its wired count. 2800 */ 2801 static int 2802 pmap_release_callback(pv_entry_t pv, void *data) 2803 { 2804 struct pmap_release_info *info = data; 2805 pmap_t pmap = info->pmap; 2806 vm_pindex_t pindex; 2807 int r; 2808 2809 /* 2810 * Acquire a held and locked pv, check for release race 2811 */ 2812 pindex = pv->pv_pindex; 2813 if (info->pvp == pv) { 2814 spin_unlock(&pmap->pm_spin); 2815 info->pvp = NULL; 2816 } else if (pv_hold_try(pv)) { 2817 spin_unlock(&pmap->pm_spin); 2818 } else { 2819 spin_unlock(&pmap->pm_spin); 2820 pv_lock(pv); 2821 pv_put(pv); 2822 info->retry = 1; 2823 spin_lock(&pmap->pm_spin); 2824 2825 return -1; 2826 } 2827 KKASSERT(pv->pv_pmap == pmap && pindex == pv->pv_pindex); 2828 2829 if (pv->pv_pindex < pmap_pt_pindex(0)) { 2830 /* 2831 * I am PTE, parent is PT 2832 */ 2833 pindex = pv->pv_pindex >> NPTEPGSHIFT; 2834 pindex += NUPTE_TOTAL; 2835 } else if (pv->pv_pindex < pmap_pd_pindex(0)) { 2836 /* 2837 * I am PT, parent is PD 2838 */ 2839 pindex = (pv->pv_pindex - NUPTE_TOTAL) >> NPDEPGSHIFT; 2840 pindex += NUPTE_TOTAL + NUPT_TOTAL; 2841 } else if (pv->pv_pindex < pmap_pdp_pindex(0)) { 2842 /* 2843 * I am PD, parent is PDP 2844 */ 2845 pindex = (pv->pv_pindex - NUPTE_TOTAL - NUPT_TOTAL) >> 2846 NPDPEPGSHIFT; 2847 pindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL; 2848 } else if (pv->pv_pindex < pmap_pml4_pindex()) { 2849 /* 2850 * I am PDP, parent is PML4. We always calculate the 2851 * normal PML4 here, not the isolated PML4. 2852 */ 2853 pindex = pmap_pml4_pindex(); 2854 } else { 2855 /* 2856 * parent is NULL 2857 */ 2858 if (info->pvp) { 2859 pv_put(info->pvp); 2860 info->pvp = NULL; 2861 } 2862 pindex = 0; 2863 } 2864 if (pindex) { 2865 if (info->pvp && info->pvp->pv_pindex != pindex) { 2866 pv_put(info->pvp); 2867 info->pvp = NULL; 2868 } 2869 if (info->pvp == NULL) 2870 info->pvp = pv_get(pmap, pindex, NULL); 2871 } else { 2872 if (info->pvp) { 2873 pv_put(info->pvp); 2874 info->pvp = NULL; 2875 } 2876 } 2877 r = pmap_release_pv(pv, info->pvp, NULL); 2878 spin_lock(&pmap->pm_spin); 2879 2880 return(r); 2881 } 2882 2883 /* 2884 * Called with held (i.e. also locked) pv. This function will dispose of 2885 * the lock along with the pv. 2886 * 2887 * If the caller already holds the locked parent page table for pv it 2888 * must pass it as pvp, allowing us to avoid a deadlock, else it can 2889 * pass NULL for pvp. 2890 */ 2891 static int 2892 pmap_release_pv(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk) 2893 { 2894 vm_page_t p; 2895 2896 /* 2897 * The pmap is currently not spinlocked, pv is held+locked. 2898 * Remove the pv's page from its parent's page table. The 2899 * parent's page table page's wire_count will be decremented. 2900 * 2901 * This will clean out the pte at any level of the page table. 2902 * If smp != 0 all cpus are affected. 2903 * 2904 * Do not tear-down recursively, its faster to just let the 2905 * release run its course. 2906 */ 2907 pmap_remove_pv_pte(pv, pvp, bulk, 0); 2908 2909 /* 2910 * Terminal pvs are unhooked from their vm_pages. Because 2911 * terminal pages aren't page table pages they aren't wired 2912 * by us, so we have to be sure not to unwire them either. 2913 * 2914 * XXX It is unclear if this code ever gets called because we 2915 * no longer use pv's to track terminal pages. 2916 */ 2917 if (pv->pv_pindex < pmap_pt_pindex(0)) { 2918 pmap_remove_pv_page(pv, 0); 2919 goto skip; 2920 } 2921 2922 /* 2923 * We leave the top-level page table page cached, wired, and 2924 * mapped in the pmap until the dtor function (pmap_puninit()) 2925 * gets called. 2926 * 2927 * Since we are leaving the top-level pv intact we need 2928 * to break out of what would otherwise be an infinite loop. 2929 * 2930 * This covers both the normal and the isolated PML4 page. 2931 */ 2932 if (pv->pv_pindex >= pmap_pml4_pindex()) { 2933 pv_put(pv); 2934 return(-1); 2935 } 2936 2937 /* 2938 * For page table pages (other than the top-level page), 2939 * remove and free the vm_page. The representitive mapping 2940 * removed above by pmap_remove_pv_pte() did not undo the 2941 * last wire_count so we have to do that as well. 2942 */ 2943 p = pmap_remove_pv_page(pv, 1); 2944 vm_page_busy_wait(p, FALSE, "pmaprl"); 2945 if (p->wire_count != 1) { 2946 const char *tstr; 2947 2948 if (pv->pv_pindex >= pmap_pdp_pindex(0)) 2949 tstr = "PDP"; 2950 else if (pv->pv_pindex >= pmap_pd_pindex(0)) 2951 tstr = "PD"; 2952 else if (pv->pv_pindex >= pmap_pt_pindex(0)) 2953 tstr = "PT"; 2954 else 2955 tstr = "PTE"; 2956 2957 kprintf("p(%s) p->wire_count was %016lx %d\n", 2958 tstr, pv->pv_pindex, p->wire_count); 2959 } 2960 KKASSERT(p->wire_count == 1); 2961 KKASSERT(p->flags & PG_UNQUEUED); 2962 2963 vm_page_unwire(p, 0); 2964 KKASSERT(p->wire_count == 0); 2965 2966 vm_page_free(p); 2967 skip: 2968 pv_free(pv, pvp); 2969 2970 return 0; 2971 } 2972 2973 /* 2974 * This function will remove the pte associated with a pv from its parent. 2975 * Terminal pv's are supported. All cpus specified by (bulk) are properly 2976 * invalidated. 2977 * 2978 * The wire count will be dropped on the parent page table. The wire 2979 * count on the page being removed (pv->pv_m) from the parent page table 2980 * is NOT touched. Note that terminal pages will not have any additional 2981 * wire counts while page table pages will have at least one representing 2982 * the mapping, plus others representing sub-mappings. 2983 * 2984 * NOTE: Cannot be called on kernel page table pages, only KVM terminal 2985 * pages and user page table and terminal pages. 2986 * 2987 * NOTE: The pte being removed might be unmanaged, and the pv supplied might 2988 * be freshly allocated and not imply that the pte is managed. In this 2989 * case pv->pv_m should be NULL. 2990 * 2991 * The pv must be locked. The pvp, if supplied, must be locked. All 2992 * supplied pv's will remain locked on return. 2993 * 2994 * XXX must lock parent pv's if they exist to remove pte XXX 2995 */ 2996 static 2997 void 2998 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk, 2999 int destroy) 3000 { 3001 vm_pindex_t ptepindex = pv->pv_pindex; 3002 pmap_t pmap = pv->pv_pmap; 3003 vm_page_t p; 3004 int gotpvp = 0; 3005 3006 KKASSERT(pmap); 3007 3008 if (ptepindex >= pmap_pml4_pindex()) { 3009 /* 3010 * We are the top level PML4E table, there is no parent. 3011 * 3012 * This is either the normal or isolated PML4E table. 3013 * Only the normal is used in regular operation, the isolated 3014 * is only passed in when breaking down the whole pmap. 3015 */ 3016 p = pmap->pm_pmlpv->pv_m; 3017 KKASSERT(pv->pv_m == p); /* debugging */ 3018 } else if (ptepindex >= pmap_pdp_pindex(0)) { 3019 /* 3020 * Remove a PDP page from the PML4E. This can only occur 3021 * with user page tables. We do not have to lock the 3022 * pml4 PV so just ignore pvp. 3023 */ 3024 vm_pindex_t pml4_pindex; 3025 vm_pindex_t pdp_index; 3026 pml4_entry_t *pdp; 3027 pml4_entry_t *pdp_iso; 3028 3029 pdp_index = ptepindex - pmap_pdp_pindex(0); 3030 if (pvp == NULL) { 3031 pml4_pindex = pmap_pml4_pindex(); 3032 pvp = pv_get(pv->pv_pmap, pml4_pindex, NULL); 3033 KKASSERT(pvp); 3034 gotpvp = 1; 3035 } 3036 3037 pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)]; 3038 KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0); 3039 p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME); 3040 pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp, 0); 3041 3042 /* 3043 * Also remove the PDP from the isolated PML4E if the 3044 * process uses one. 3045 */ 3046 if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso) { 3047 pdp_iso = &pmap->pm_pml4_iso[pdp_index & 3048 ((1ul << NPML4EPGSHIFT) - 1)]; 3049 pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp_iso, 0); 3050 } 3051 KKASSERT(pv->pv_m == p); /* debugging */ 3052 } else if (ptepindex >= pmap_pd_pindex(0)) { 3053 /* 3054 * Remove a PD page from the PDP 3055 * 3056 * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case 3057 * of a simple pmap because it stops at 3058 * the PD page. 3059 */ 3060 vm_pindex_t pdp_pindex; 3061 vm_pindex_t pd_index; 3062 pdp_entry_t *pd; 3063 3064 pd_index = ptepindex - pmap_pd_pindex(0); 3065 3066 if (pvp == NULL) { 3067 pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + 3068 (pd_index >> NPML4EPGSHIFT); 3069 pvp = pv_get(pv->pv_pmap, pdp_pindex, NULL); 3070 gotpvp = 1; 3071 } 3072 3073 if (pvp) { 3074 pd = pv_pte_lookup(pvp, pd_index & 3075 ((1ul << NPDPEPGSHIFT) - 1)); 3076 KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0); 3077 p = PHYS_TO_VM_PAGE(*pd & PG_FRAME); 3078 pmap_inval_bulk(bulk, (vm_offset_t)-1, pd, 0); 3079 } else { 3080 KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE); 3081 p = pv->pv_m; /* degenerate test later */ 3082 } 3083 KKASSERT(pv->pv_m == p); /* debugging */ 3084 } else if (ptepindex >= pmap_pt_pindex(0)) { 3085 /* 3086 * Remove a PT page from the PD 3087 */ 3088 vm_pindex_t pd_pindex; 3089 vm_pindex_t pt_index; 3090 pd_entry_t *pt; 3091 3092 pt_index = ptepindex - pmap_pt_pindex(0); 3093 3094 if (pvp == NULL) { 3095 pd_pindex = NUPTE_TOTAL + NUPT_TOTAL + 3096 (pt_index >> NPDPEPGSHIFT); 3097 pvp = pv_get(pv->pv_pmap, pd_pindex, NULL); 3098 KKASSERT(pvp); 3099 gotpvp = 1; 3100 } 3101 3102 pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1)); 3103 #if 0 3104 KASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0, 3105 ("*pt unexpectedly invalid %016jx " 3106 "gotpvp=%d ptepindex=%ld ptindex=%ld pv=%p pvp=%p", 3107 *pt, gotpvp, ptepindex, pt_index, pv, pvp)); 3108 p = PHYS_TO_VM_PAGE(*pt & PG_FRAME); 3109 #else 3110 if ((*pt & pmap->pmap_bits[PG_V_IDX]) == 0) { 3111 kprintf("*pt unexpectedly invalid %016jx " 3112 "gotpvp=%d ptepindex=%ld ptindex=%ld " 3113 "pv=%p pvp=%p\n", 3114 *pt, gotpvp, ptepindex, pt_index, pv, pvp); 3115 tsleep(pt, 0, "DEAD", 0); 3116 p = pv->pv_m; 3117 } else { 3118 p = PHYS_TO_VM_PAGE(*pt & PG_FRAME); 3119 } 3120 #endif 3121 pmap_inval_bulk(bulk, (vm_offset_t)-1, pt, 0); 3122 KKASSERT(pv->pv_m == p); /* debugging */ 3123 } else { 3124 KKASSERT(0); 3125 } 3126 3127 /* 3128 * If requested, scrap the underlying pv->pv_m and the underlying 3129 * pv. If this is a page-table-page we must also free the page. 3130 * 3131 * pvp must be returned locked. 3132 */ 3133 if (destroy == 1) { 3134 /* 3135 * page table page (PT, PD, PDP, PML4), caller was responsible 3136 * for testing wired_count. 3137 */ 3138 KKASSERT(pv->pv_m->wire_count == 1); 3139 p = pmap_remove_pv_page(pv, 1); 3140 pv_free(pv, pvp); 3141 pv = NULL; 3142 3143 vm_page_busy_wait(p, FALSE, "pgpun"); 3144 vm_page_unwire(p, 0); 3145 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE); 3146 vm_page_free(p); 3147 } 3148 3149 /* 3150 * If we acquired pvp ourselves then we are responsible for 3151 * recursively deleting it. 3152 */ 3153 if (pvp && gotpvp) { 3154 /* 3155 * Recursively destroy higher-level page tables. 3156 * 3157 * This is optional. If we do not, they will still 3158 * be destroyed when the process exits. 3159 * 3160 * NOTE: Do not destroy pv_entry's with extra hold refs, 3161 * a caller may have unlocked it and intends to 3162 * continue to use it. 3163 */ 3164 if (pmap_dynamic_delete && 3165 pvp->pv_m && 3166 pvp->pv_m->wire_count == 1 && 3167 (pvp->pv_hold & PV_HOLD_MASK) == 2 && 3168 pvp->pv_pindex < pmap_pml4_pindex()) { 3169 if (pmap != &kernel_pmap) { 3170 pmap_remove_pv_pte(pvp, NULL, bulk, 1); 3171 pvp = NULL; /* safety */ 3172 } else { 3173 kprintf("Attempt to remove kernel_pmap pindex " 3174 "%jd\n", pvp->pv_pindex); 3175 pv_put(pvp); 3176 } 3177 } else { 3178 pv_put(pvp); 3179 } 3180 } 3181 } 3182 3183 /* 3184 * Remove the vm_page association to a pv. The pv must be locked. 3185 */ 3186 static 3187 vm_page_t 3188 pmap_remove_pv_page(pv_entry_t pv, int clrpgbits) 3189 { 3190 vm_page_t m; 3191 3192 m = pv->pv_m; 3193 pv->pv_m = NULL; 3194 if (clrpgbits) 3195 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); 3196 3197 return(m); 3198 } 3199 3200 /* 3201 * Grow the number of kernel page table entries, if needed. 3202 * 3203 * This routine is always called to validate any address space 3204 * beyond KERNBASE (for kldloads). kernel_vm_end only governs the address 3205 * space below KERNBASE. 3206 * 3207 * kernel_map must be locked exclusively by the caller. 3208 */ 3209 void 3210 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) 3211 { 3212 vm_paddr_t paddr; 3213 vm_offset_t ptppaddr; 3214 vm_page_t nkpg; 3215 pd_entry_t *pt, newpt; 3216 pdp_entry_t *pd, newpd; 3217 int update_kernel_vm_end; 3218 3219 /* 3220 * bootstrap kernel_vm_end on first real VM use 3221 */ 3222 if (kernel_vm_end == 0) { 3223 kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 3224 3225 for (;;) { 3226 pt = pmap_pt(&kernel_pmap, kernel_vm_end); 3227 if (pt == NULL) 3228 break; 3229 if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) == 0) 3230 break; 3231 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & 3232 ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1); 3233 if (kernel_vm_end - 1 >= vm_map_max(&kernel_map)) { 3234 kernel_vm_end = vm_map_max(&kernel_map); 3235 break; 3236 } 3237 } 3238 } 3239 3240 /* 3241 * Fill in the gaps. kernel_vm_end is only adjusted for ranges 3242 * below KERNBASE. Ranges above KERNBASE are kldloaded and we 3243 * do not want to force-fill 128G worth of page tables. 3244 */ 3245 if (kstart < KERNBASE) { 3246 if (kstart > kernel_vm_end) 3247 kstart = kernel_vm_end; 3248 KKASSERT(kend <= KERNBASE); 3249 update_kernel_vm_end = 1; 3250 } else { 3251 update_kernel_vm_end = 0; 3252 } 3253 3254 kstart = rounddown2(kstart, (vm_offset_t)(PAGE_SIZE * NPTEPG)); 3255 kend = roundup2(kend, (vm_offset_t)(PAGE_SIZE * NPTEPG)); 3256 3257 if (kend - 1 >= vm_map_max(&kernel_map)) 3258 kend = vm_map_max(&kernel_map); 3259 3260 while (kstart < kend) { 3261 pt = pmap_pt(&kernel_pmap, kstart); 3262 if (pt == NULL) { 3263 /* 3264 * We need a new PD entry 3265 */ 3266 nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++, 3267 VM_ALLOC_NORMAL | 3268 VM_ALLOC_SYSTEM | 3269 VM_ALLOC_INTERRUPT); 3270 if (nkpg == NULL) { 3271 panic("pmap_growkernel: no memory to grow " 3272 "kernel"); 3273 } 3274 paddr = VM_PAGE_TO_PHYS(nkpg); 3275 pmap_zero_page(paddr); 3276 pd = pmap_pd(&kernel_pmap, kstart); 3277 3278 newpd = (pdp_entry_t) 3279 (paddr | 3280 kernel_pmap.pmap_bits[PG_V_IDX] | 3281 kernel_pmap.pmap_bits[PG_RW_IDX] | 3282 kernel_pmap.pmap_bits[PG_A_IDX]); 3283 atomic_swap_long(pd, newpd); 3284 3285 #if 0 3286 kprintf("NEWPD pd=%p pde=%016jx phys=%016jx\n", 3287 pd, newpd, paddr); 3288 #endif 3289 3290 continue; /* try again */ 3291 } 3292 3293 if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) { 3294 kstart = (kstart + PAGE_SIZE * NPTEPG) & 3295 ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1); 3296 if (kstart - 1 >= vm_map_max(&kernel_map)) { 3297 kstart = vm_map_max(&kernel_map); 3298 break; 3299 } 3300 continue; 3301 } 3302 3303 /* 3304 * We need a new PT 3305 * 3306 * This index is bogus, but out of the way 3307 */ 3308 nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++, 3309 VM_ALLOC_NORMAL | 3310 VM_ALLOC_SYSTEM | 3311 VM_ALLOC_INTERRUPT); 3312 if (nkpg == NULL) 3313 panic("pmap_growkernel: no memory to grow kernel"); 3314 3315 vm_page_wire(nkpg); 3316 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 3317 pmap_zero_page(ptppaddr); 3318 newpt = (pd_entry_t)(ptppaddr | 3319 kernel_pmap.pmap_bits[PG_V_IDX] | 3320 kernel_pmap.pmap_bits[PG_RW_IDX] | 3321 kernel_pmap.pmap_bits[PG_A_IDX]); 3322 atomic_swap_long(pt, newpt); 3323 3324 kstart = (kstart + PAGE_SIZE * NPTEPG) & 3325 ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1); 3326 3327 if (kstart - 1 >= vm_map_max(&kernel_map)) { 3328 kstart = vm_map_max(&kernel_map); 3329 break; 3330 } 3331 } 3332 3333 /* 3334 * Only update kernel_vm_end for areas below KERNBASE. 3335 */ 3336 if (update_kernel_vm_end && kernel_vm_end < kstart) 3337 kernel_vm_end = kstart; 3338 } 3339 3340 /* 3341 * Add a reference to the specified pmap. 3342 */ 3343 void 3344 pmap_reference(pmap_t pmap) 3345 { 3346 if (pmap != NULL) 3347 atomic_add_int(&pmap->pm_count, 1); 3348 } 3349 3350 void 3351 pmap_maybethreaded(pmap_t pmap) 3352 { 3353 atomic_set_int(&pmap->pm_flags, PMAP_MULTI); 3354 } 3355 3356 /* 3357 * Called while page is hard-busied to clear the PG_MAPPED and PG_WRITEABLE 3358 * flags if able. This can happen when the pmap code is unable to clear 3359 * the bits in prior actions due to not holding the page hard-busied at 3360 * the time. 3361 * 3362 * The clearing of PG_MAPPED/WRITEABLE is an optional optimization done 3363 * when the pte is removed and only if the pte has not been multiply-mapped. 3364 * The caller may have to call vm_page_protect() if the bits are still set 3365 * here. 3366 * 3367 * This function is expected to be quick. 3368 */ 3369 int 3370 pmap_mapped_sync(vm_page_t m) 3371 { 3372 return (m->flags); 3373 } 3374 3375 /*************************************************** 3376 * page management routines. 3377 ***************************************************/ 3378 3379 /* 3380 * Hold a pv without locking it 3381 */ 3382 #if 0 3383 static void 3384 pv_hold(pv_entry_t pv) 3385 { 3386 atomic_add_int(&pv->pv_hold, 1); 3387 } 3388 #endif 3389 3390 /* 3391 * Hold a pv_entry, preventing its destruction. TRUE is returned if the pv 3392 * was successfully locked, FALSE if it wasn't. The caller must dispose of 3393 * the pv properly. 3394 * 3395 * Either the pmap->pm_spin or the related vm_page_spin (if traversing a 3396 * pv list via its page) must be held by the caller in order to stabilize 3397 * the pv. 3398 */ 3399 static int 3400 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL) 3401 { 3402 u_int count; 3403 3404 /* 3405 * Critical path shortcut expects pv to already have one ref 3406 * (for the pv->pv_pmap). 3407 */ 3408 count = pv->pv_hold; 3409 cpu_ccfence(); 3410 for (;;) { 3411 if ((count & PV_HOLD_LOCKED) == 0) { 3412 if (atomic_fcmpset_int(&pv->pv_hold, &count, 3413 (count + 1) | PV_HOLD_LOCKED)) { 3414 #ifdef PMAP_DEBUG 3415 pv->pv_func = func; 3416 pv->pv_line = lineno; 3417 #endif 3418 return TRUE; 3419 } 3420 } else { 3421 if (atomic_fcmpset_int(&pv->pv_hold, &count, count + 1)) 3422 return FALSE; 3423 } 3424 /* retry */ 3425 } 3426 } 3427 3428 /* 3429 * Drop a previously held pv_entry which could not be locked, allowing its 3430 * destruction. 3431 * 3432 * Must not be called with a spinlock held as we might zfree() the pv if it 3433 * is no longer associated with a pmap and this was the last hold count. 3434 */ 3435 static void 3436 pv_drop(pv_entry_t pv) 3437 { 3438 u_int count; 3439 3440 for (;;) { 3441 count = pv->pv_hold; 3442 cpu_ccfence(); 3443 KKASSERT((count & PV_HOLD_MASK) > 0); 3444 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) != 3445 (PV_HOLD_LOCKED | 1)); 3446 if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) { 3447 if ((count & PV_HOLD_MASK) == 1) { 3448 #ifdef PMAP_DEBUG2 3449 if (pmap_enter_debug > 0) { 3450 --pmap_enter_debug; 3451 kprintf("pv_drop: free pv %p\n", pv); 3452 } 3453 #endif 3454 KKASSERT(count == 1); 3455 KKASSERT(pv->pv_pmap == NULL); 3456 zfree(pvzone, pv); 3457 } 3458 return; 3459 } 3460 /* retry */ 3461 } 3462 } 3463 3464 /* 3465 * Find or allocate the requested PV entry, returning a locked, held pv. 3466 * 3467 * If (*isnew) is non-zero, the returned pv will have two hold counts, one 3468 * for the caller and one representing the pmap and vm_page association. 3469 * 3470 * If (*isnew) is zero, the returned pv will have only one hold count. 3471 * 3472 * Since both associations can only be adjusted while the pv is locked, 3473 * together they represent just one additional hold. 3474 */ 3475 static 3476 pv_entry_t 3477 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL) 3478 { 3479 struct mdglobaldata *md = mdcpu; 3480 pv_entry_t pv; 3481 pv_entry_t pnew; 3482 int pmap_excl = 0; 3483 3484 pnew = NULL; 3485 if (md->gd_newpv) { 3486 #if 1 3487 pnew = atomic_swap_ptr((void *)&md->gd_newpv, NULL); 3488 #else 3489 crit_enter(); 3490 pnew = md->gd_newpv; /* might race NULL */ 3491 md->gd_newpv = NULL; 3492 crit_exit(); 3493 #endif 3494 } 3495 if (pnew == NULL) 3496 pnew = zalloc(pvzone); 3497 3498 spin_lock_shared(&pmap->pm_spin); 3499 for (;;) { 3500 /* 3501 * Shortcut cache 3502 */ 3503 pv = pv_entry_lookup(pmap, pindex); 3504 if (pv == NULL) { 3505 vm_pindex_t *pmark; 3506 3507 /* 3508 * Requires exclusive pmap spinlock 3509 */ 3510 if (pmap_excl == 0) { 3511 pmap_excl = 1; 3512 if (!spin_lock_upgrade_try(&pmap->pm_spin)) { 3513 spin_unlock_shared(&pmap->pm_spin); 3514 spin_lock(&pmap->pm_spin); 3515 continue; 3516 } 3517 } 3518 3519 /* 3520 * We need to block if someone is holding our 3521 * placemarker. As long as we determine the 3522 * placemarker has not been aquired we do not 3523 * need to get it as acquision also requires 3524 * the pmap spin lock. 3525 * 3526 * However, we can race the wakeup. 3527 */ 3528 pmark = pmap_placemarker_hash(pmap, pindex); 3529 3530 if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) { 3531 tsleep_interlock(pmark, 0); 3532 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP); 3533 if (((*pmark ^ pindex) & 3534 ~PM_PLACEMARK_WAKEUP) == 0) { 3535 spin_unlock(&pmap->pm_spin); 3536 tsleep(pmark, PINTERLOCKED, "pvplc", 0); 3537 spin_lock(&pmap->pm_spin); 3538 } 3539 continue; 3540 } 3541 3542 /* 3543 * Setup the new entry 3544 */ 3545 pnew->pv_pmap = pmap; 3546 pnew->pv_pindex = pindex; 3547 pnew->pv_hold = PV_HOLD_LOCKED | 2; 3548 pnew->pv_flags = 0; 3549 #ifdef PMAP_DEBUG 3550 pnew->pv_func = func; 3551 pnew->pv_line = lineno; 3552 if (pnew->pv_line_lastfree > 0) { 3553 pnew->pv_line_lastfree = 3554 -pnew->pv_line_lastfree; 3555 } 3556 #endif 3557 pv = pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew); 3558 atomic_add_long(&pmap->pm_stats.resident_count, 1); 3559 spin_unlock(&pmap->pm_spin); 3560 *isnew = 1; 3561 3562 KASSERT(pv == NULL, ("pv insert failed %p->%p", pnew, pv)); 3563 return(pnew); 3564 } 3565 3566 /* 3567 * We already have an entry, cleanup the staged pnew if 3568 * we can get the lock, otherwise block and retry. 3569 */ 3570 if (__predict_true(_pv_hold_try(pv PMAP_DEBUG_COPY))) { 3571 if (pmap_excl) 3572 spin_unlock(&pmap->pm_spin); 3573 else 3574 spin_unlock_shared(&pmap->pm_spin); 3575 #if 1 3576 pnew = atomic_swap_ptr((void *)&md->gd_newpv, pnew); 3577 if (pnew) 3578 zfree(pvzone, pnew); 3579 #else 3580 crit_enter(); 3581 if (md->gd_newpv == NULL) 3582 md->gd_newpv = pnew; 3583 else 3584 zfree(pvzone, pnew); 3585 crit_exit(); 3586 #endif 3587 KKASSERT(pv->pv_pmap == pmap && 3588 pv->pv_pindex == pindex); 3589 *isnew = 0; 3590 return(pv); 3591 } 3592 if (pmap_excl) { 3593 spin_unlock(&pmap->pm_spin); 3594 _pv_lock(pv PMAP_DEBUG_COPY); 3595 pv_put(pv); 3596 spin_lock(&pmap->pm_spin); 3597 } else { 3598 spin_unlock_shared(&pmap->pm_spin); 3599 _pv_lock(pv PMAP_DEBUG_COPY); 3600 pv_put(pv); 3601 spin_lock_shared(&pmap->pm_spin); 3602 } 3603 } 3604 /* NOT REACHED */ 3605 } 3606 3607 /* 3608 * Find the requested PV entry, returning a locked+held pv or NULL 3609 */ 3610 static 3611 pv_entry_t 3612 _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp PMAP_DEBUG_DECL) 3613 { 3614 pv_entry_t pv; 3615 int pmap_excl = 0; 3616 3617 spin_lock_shared(&pmap->pm_spin); 3618 for (;;) { 3619 /* 3620 * Shortcut cache 3621 */ 3622 pv = pv_entry_lookup(pmap, pindex); 3623 if (pv == NULL) { 3624 /* 3625 * Block if there is ANY placemarker. If we are to 3626 * return it, we must also aquire the spot, so we 3627 * have to block even if the placemarker is held on 3628 * a different address. 3629 * 3630 * OPTIMIZATION: If pmarkp is passed as NULL the 3631 * caller is just probing (or looking for a real 3632 * pv_entry), and in this case we only need to check 3633 * to see if the placemarker matches pindex. 3634 */ 3635 vm_pindex_t *pmark; 3636 3637 /* 3638 * Requires exclusive pmap spinlock 3639 */ 3640 if (pmap_excl == 0) { 3641 pmap_excl = 1; 3642 if (!spin_lock_upgrade_try(&pmap->pm_spin)) { 3643 spin_unlock_shared(&pmap->pm_spin); 3644 spin_lock(&pmap->pm_spin); 3645 continue; 3646 } 3647 } 3648 3649 pmark = pmap_placemarker_hash(pmap, pindex); 3650 3651 if ((pmarkp && *pmark != PM_NOPLACEMARK) || 3652 ((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) { 3653 tsleep_interlock(pmark, 0); 3654 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP); 3655 if ((pmarkp && *pmark != PM_NOPLACEMARK) || 3656 ((*pmark ^ pindex) & 3657 ~PM_PLACEMARK_WAKEUP) == 0) { 3658 spin_unlock(&pmap->pm_spin); 3659 tsleep(pmark, PINTERLOCKED, "pvpld", 0); 3660 spin_lock(&pmap->pm_spin); 3661 } 3662 continue; 3663 } 3664 if (pmarkp) { 3665 if (atomic_swap_long(pmark, pindex) != 3666 PM_NOPLACEMARK) { 3667 panic("_pv_get: pmark race"); 3668 } 3669 *pmarkp = pmark; 3670 } 3671 spin_unlock(&pmap->pm_spin); 3672 return NULL; 3673 } 3674 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) { 3675 if (pmap_excl) 3676 spin_unlock(&pmap->pm_spin); 3677 else 3678 spin_unlock_shared(&pmap->pm_spin); 3679 KKASSERT(pv->pv_pmap == pmap && 3680 pv->pv_pindex == pindex); 3681 return(pv); 3682 } 3683 if (pmap_excl) { 3684 spin_unlock(&pmap->pm_spin); 3685 _pv_lock(pv PMAP_DEBUG_COPY); 3686 pv_put(pv); 3687 spin_lock(&pmap->pm_spin); 3688 } else { 3689 spin_unlock_shared(&pmap->pm_spin); 3690 _pv_lock(pv PMAP_DEBUG_COPY); 3691 pv_put(pv); 3692 spin_lock_shared(&pmap->pm_spin); 3693 } 3694 } 3695 } 3696 3697 /* 3698 * Lookup, hold, and attempt to lock (pmap,pindex). 3699 * 3700 * If the entry does not exist NULL is returned and *errorp is set to 0 3701 * 3702 * If the entry exists and could be successfully locked it is returned and 3703 * errorp is set to 0. 3704 * 3705 * If the entry exists but could NOT be successfully locked it is returned 3706 * held and *errorp is set to 1. 3707 * 3708 * If the entry is placemarked by someone else NULL is returned and *errorp 3709 * is set to 1. 3710 */ 3711 static 3712 pv_entry_t 3713 pv_get_try(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp, int *errorp) 3714 { 3715 pv_entry_t pv; 3716 3717 spin_lock_shared(&pmap->pm_spin); 3718 3719 pv = pv_entry_lookup(pmap, pindex); 3720 if (pv == NULL) { 3721 vm_pindex_t *pmark; 3722 3723 pmark = pmap_placemarker_hash(pmap, pindex); 3724 3725 if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) { 3726 *errorp = 1; 3727 } else if (pmarkp && 3728 atomic_cmpset_long(pmark, PM_NOPLACEMARK, pindex)) { 3729 *errorp = 0; 3730 } else { 3731 /* 3732 * Can't set a placemark with a NULL pmarkp, or if 3733 * pmarkp is non-NULL but we failed to set our 3734 * placemark. 3735 */ 3736 *errorp = 1; 3737 } 3738 if (pmarkp) 3739 *pmarkp = pmark; 3740 spin_unlock_shared(&pmap->pm_spin); 3741 3742 return NULL; 3743 } 3744 3745 /* 3746 * XXX This has problems if the lock is shared, why? 3747 */ 3748 if (pv_hold_try(pv)) { 3749 spin_unlock_shared(&pmap->pm_spin); 3750 *errorp = 0; 3751 KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex); 3752 return(pv); /* lock succeeded */ 3753 } 3754 spin_unlock_shared(&pmap->pm_spin); 3755 *errorp = 1; 3756 3757 return (pv); /* lock failed */ 3758 } 3759 3760 /* 3761 * Lock a held pv, keeping the hold count 3762 */ 3763 static 3764 void 3765 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL) 3766 { 3767 u_int count; 3768 3769 for (;;) { 3770 count = pv->pv_hold; 3771 cpu_ccfence(); 3772 if ((count & PV_HOLD_LOCKED) == 0) { 3773 if (atomic_cmpset_int(&pv->pv_hold, count, 3774 count | PV_HOLD_LOCKED)) { 3775 #ifdef PMAP_DEBUG 3776 pv->pv_func = func; 3777 pv->pv_line = lineno; 3778 #endif 3779 return; 3780 } 3781 continue; 3782 } 3783 tsleep_interlock(pv, 0); 3784 if (atomic_cmpset_int(&pv->pv_hold, count, 3785 count | PV_HOLD_WAITING)) { 3786 #ifdef PMAP_DEBUG2 3787 if (pmap_enter_debug > 0) { 3788 --pmap_enter_debug; 3789 kprintf("pv waiting on %s:%d\n", 3790 pv->pv_func, pv->pv_line); 3791 } 3792 #endif 3793 tsleep(pv, PINTERLOCKED, "pvwait", hz); 3794 } 3795 /* retry */ 3796 } 3797 } 3798 3799 /* 3800 * Unlock a held and locked pv, keeping the hold count. 3801 */ 3802 static 3803 void 3804 pv_unlock(pv_entry_t pv) 3805 { 3806 u_int count; 3807 3808 for (;;) { 3809 count = pv->pv_hold; 3810 cpu_ccfence(); 3811 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >= 3812 (PV_HOLD_LOCKED | 1)); 3813 if (atomic_cmpset_int(&pv->pv_hold, count, 3814 count & 3815 ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) { 3816 if (count & PV_HOLD_WAITING) 3817 wakeup(pv); 3818 break; 3819 } 3820 } 3821 } 3822 3823 /* 3824 * Unlock and drop a pv. If the pv is no longer associated with a pmap 3825 * and the hold count drops to zero we will free it. 3826 * 3827 * Caller should not hold any spin locks. We are protected from hold races 3828 * by virtue of holds only occuring only with a pmap_spin or vm_page_spin 3829 * lock held. A pv cannot be located otherwise. 3830 */ 3831 static 3832 void 3833 pv_put(pv_entry_t pv) 3834 { 3835 #ifdef PMAP_DEBUG2 3836 if (pmap_enter_debug > 0) { 3837 --pmap_enter_debug; 3838 kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold); 3839 } 3840 #endif 3841 3842 /* 3843 * Normal put-aways must have a pv_m associated with the pv, 3844 * but allow the case where the pv has been destructed due 3845 * to pmap_dynamic_delete. 3846 */ 3847 KKASSERT(pv->pv_pmap == NULL || pv->pv_m != NULL); 3848 3849 /* 3850 * Fast - shortcut most common condition 3851 */ 3852 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1)) 3853 return; 3854 3855 /* 3856 * Slow 3857 */ 3858 pv_unlock(pv); 3859 pv_drop(pv); 3860 } 3861 3862 /* 3863 * Remove the pmap association from a pv, require that pv_m already be removed, 3864 * then unlock and drop the pv. Any pte operations must have already been 3865 * completed. This call may result in a last-drop which will physically free 3866 * the pv. 3867 * 3868 * Removing the pmap association entails an additional drop. 3869 * 3870 * pv must be exclusively locked on call and will be disposed of on return. 3871 */ 3872 static 3873 void 3874 _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL) 3875 { 3876 pmap_t pmap; 3877 3878 #ifdef PMAP_DEBUG 3879 pv->pv_func_lastfree = func; 3880 pv->pv_line_lastfree = lineno; 3881 #endif 3882 KKASSERT(pv->pv_m == NULL); 3883 KKASSERT((pv->pv_hold & (PV_HOLD_LOCKED|PV_HOLD_MASK)) >= 3884 (PV_HOLD_LOCKED|1)); 3885 if ((pmap = pv->pv_pmap) != NULL) { 3886 spin_lock(&pmap->pm_spin); 3887 KKASSERT(pv->pv_pmap == pmap); 3888 if (pmap->pm_pvhint_pt == pv) 3889 pmap->pm_pvhint_pt = NULL; 3890 if (pmap->pm_pvhint_unused == pv) 3891 pmap->pm_pvhint_unused = NULL; 3892 pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv); 3893 atomic_add_long(&pmap->pm_stats.resident_count, -1); 3894 pv->pv_pmap = NULL; 3895 pv->pv_pindex = 0; 3896 spin_unlock(&pmap->pm_spin); 3897 3898 /* 3899 * Try to shortcut three atomic ops, otherwise fall through 3900 * and do it normally. Drop two refs and the lock all in 3901 * one go. 3902 */ 3903 if (pvp) { 3904 if (vm_page_unwire_quick(pvp->pv_m)) 3905 panic("_pv_free: bad wirecount on pvp"); 3906 } 3907 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) { 3908 #ifdef PMAP_DEBUG2 3909 if (pmap_enter_debug > 0) { 3910 --pmap_enter_debug; 3911 kprintf("pv_free: free pv %p\n", pv); 3912 } 3913 #endif 3914 zfree(pvzone, pv); 3915 return; 3916 } 3917 pv_drop(pv); /* ref for pv_pmap */ 3918 } 3919 pv_unlock(pv); 3920 pv_drop(pv); 3921 } 3922 3923 /* 3924 * This routine is very drastic, but can save the system 3925 * in a pinch. 3926 */ 3927 void 3928 pmap_collect(void) 3929 { 3930 int i; 3931 vm_page_t m; 3932 static int warningdone=0; 3933 3934 if (pmap_pagedaemon_waken == 0) 3935 return; 3936 pmap_pagedaemon_waken = 0; 3937 if (warningdone < 5) { 3938 kprintf("pmap_collect: pv_entries exhausted -- " 3939 "suggest increasing vm.pmap_pv_entries above %ld\n", 3940 vm_pmap_pv_entries); 3941 warningdone++; 3942 } 3943 3944 for (i = 0; i < vm_page_array_size; i++) { 3945 m = &vm_page_array[i]; 3946 if (m->wire_count || m->hold_count) 3947 continue; 3948 if (vm_page_busy_try(m, TRUE) == 0) { 3949 if (m->wire_count == 0 && m->hold_count == 0) { 3950 pmap_remove_all(m); 3951 } 3952 vm_page_wakeup(m); 3953 } 3954 } 3955 } 3956 3957 /* 3958 * Scan the pmap for active page table entries and issue a callback. 3959 * The callback must dispose of pte_pv, whos PTE entry is at *ptep in 3960 * its parent page table. 3961 * 3962 * pte_pv will be NULL if the page or page table is unmanaged. 3963 * pt_pv will point to the page table page containing the pte for the page. 3964 * 3965 * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page), 3966 * we pass a NULL pte_pv and we pass a pt_pv pointing to the passed 3967 * process pmap's PD and page to the callback function. This can be 3968 * confusing because the pt_pv is really a pd_pv, and the target page 3969 * table page is simply aliased by the pmap and not owned by it. 3970 * 3971 * It is assumed that the start and end are properly rounded to the page size. 3972 * 3973 * It is assumed that PD pages and above are managed and thus in the RB tree, 3974 * allowing us to use RB_SCAN from the PD pages down for ranged scans. 3975 */ 3976 struct pmap_scan_info { 3977 struct pmap *pmap; 3978 vm_offset_t sva; 3979 vm_offset_t eva; 3980 vm_pindex_t sva_pd_pindex; 3981 vm_pindex_t eva_pd_pindex; 3982 void (*func)(pmap_t, struct pmap_scan_info *, 3983 vm_pindex_t *, pv_entry_t, vm_offset_t, 3984 pt_entry_t *, void *); 3985 void *arg; 3986 pmap_inval_bulk_t bulk_core; 3987 pmap_inval_bulk_t *bulk; 3988 int count; 3989 int stop; 3990 }; 3991 3992 static int pmap_scan_cmp(pv_entry_t pv, void *data); 3993 static int pmap_scan_callback(pv_entry_t pv, void *data); 3994 3995 static void 3996 pmap_scan(struct pmap_scan_info *info, int smp_inval) 3997 { 3998 struct pmap *pmap = info->pmap; 3999 pv_entry_t pt_pv; /* A page table PV */ 4000 pv_entry_t pte_pv; /* A page table entry PV */ 4001 vm_pindex_t *pte_placemark; 4002 vm_pindex_t *pt_placemark; 4003 pt_entry_t *ptep; 4004 pt_entry_t oldpte; 4005 struct pv_entry dummy_pv; 4006 4007 info->stop = 0; 4008 if (pmap == NULL) 4009 return; 4010 if (info->sva == info->eva) 4011 return; 4012 if (smp_inval) { 4013 info->bulk = &info->bulk_core; 4014 pmap_inval_bulk_init(&info->bulk_core, pmap); 4015 } else { 4016 info->bulk = NULL; 4017 } 4018 4019 /* 4020 * Hold the token for stability; if the pmap is empty we have nothing 4021 * to do. 4022 */ 4023 #if 0 4024 if (pmap->pm_stats.resident_count == 0) { 4025 return; 4026 } 4027 #endif 4028 4029 info->count = 0; 4030 4031 /* 4032 * Special handling for scanning one page, which is a very common 4033 * operation (it is?). 4034 * 4035 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4 4036 */ 4037 if (info->sva + PAGE_SIZE == info->eva) { 4038 if (info->sva >= VM_MAX_USER_ADDRESS) { 4039 /* 4040 * Kernel mappings do not track wire counts on 4041 * page table pages and only maintain pd_pv and 4042 * pte_pv levels so pmap_scan() works. 4043 */ 4044 pt_pv = NULL; 4045 pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva), 4046 &pte_placemark); 4047 KKASSERT(pte_pv == NULL); 4048 ptep = vtopte(info->sva); 4049 } else { 4050 /* 4051 * We hold pte_placemark across the operation for 4052 * unmanaged pages. 4053 * 4054 * WARNING! We must hold pt_placemark across the 4055 * *ptep test to prevent misintepreting 4056 * a non-zero *ptep as a shared page 4057 * table page. Hold it across the function 4058 * callback as well for SMP safety. 4059 */ 4060 pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva), 4061 &pte_placemark); 4062 KKASSERT(pte_pv == NULL); 4063 pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva), 4064 &pt_placemark); 4065 if (pt_pv == NULL) { 4066 #if 0 4067 KKASSERT(0); 4068 pd_pv = pv_get(pmap, 4069 pmap_pd_pindex(info->sva), 4070 NULL); 4071 if (pd_pv) { 4072 ptep = pv_pte_lookup(pd_pv, 4073 pmap_pt_index(info->sva)); 4074 if (*ptep) { 4075 info->func(pmap, info, 4076 pt_placemark, pd_pv, 4077 info->sva, ptep, 4078 info->arg); 4079 } else { 4080 pv_placemarker_wakeup(pmap, 4081 pt_placemark); 4082 } 4083 pv_put(pd_pv); 4084 } else { 4085 pv_placemarker_wakeup(pmap, 4086 pt_placemark); 4087 } 4088 #else 4089 pv_placemarker_wakeup(pmap, pt_placemark); 4090 #endif 4091 pv_placemarker_wakeup(pmap, pte_placemark); 4092 goto fast_skip; 4093 } 4094 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva)); 4095 } 4096 4097 /* 4098 * NOTE: *ptep can't be ripped out from under us if we hold 4099 * pte_pv (or pte_placemark) locked, but bits can 4100 * change. 4101 */ 4102 oldpte = *ptep; 4103 cpu_ccfence(); 4104 if (oldpte == 0) { 4105 KKASSERT(pte_pv == NULL); 4106 pv_placemarker_wakeup(pmap, pte_placemark); 4107 } else { 4108 KASSERT((oldpte & pmap->pmap_bits[PG_V_IDX]) == 4109 pmap->pmap_bits[PG_V_IDX], 4110 ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL", 4111 *ptep, oldpte, info->sva)); 4112 info->func(pmap, info, pte_placemark, pt_pv, 4113 info->sva, ptep, info->arg); 4114 } 4115 if (pt_pv) 4116 pv_put(pt_pv); 4117 fast_skip: 4118 pmap_inval_bulk_flush(info->bulk); 4119 return; 4120 } 4121 4122 /* 4123 * Nominal scan case, RB_SCAN() for PD pages and iterate from 4124 * there. 4125 * 4126 * WARNING! eva can overflow our standard ((N + mask) >> bits) 4127 * bounds, resulting in a pd_pindex of 0. To solve the 4128 * problem we use an inclusive range. 4129 */ 4130 info->sva_pd_pindex = pmap_pd_pindex(info->sva); 4131 info->eva_pd_pindex = pmap_pd_pindex(info->eva - PAGE_SIZE); 4132 4133 if (info->sva >= VM_MAX_USER_ADDRESS) { 4134 /* 4135 * The kernel does not currently maintain any pv_entry's for 4136 * higher-level page tables. 4137 */ 4138 bzero(&dummy_pv, sizeof(dummy_pv)); 4139 dummy_pv.pv_pindex = info->sva_pd_pindex; 4140 spin_lock(&pmap->pm_spin); 4141 while (dummy_pv.pv_pindex <= info->eva_pd_pindex) { 4142 pmap_scan_callback(&dummy_pv, info); 4143 ++dummy_pv.pv_pindex; 4144 if (dummy_pv.pv_pindex < info->sva_pd_pindex) /*wrap*/ 4145 break; 4146 } 4147 spin_unlock(&pmap->pm_spin); 4148 } else { 4149 /* 4150 * User page tables maintain local PML4, PDP, PD, and PT 4151 * pv_entry's. pv_entry's are not used for PTEs. 4152 */ 4153 spin_lock(&pmap->pm_spin); 4154 pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot, pmap_scan_cmp, 4155 pmap_scan_callback, info); 4156 spin_unlock(&pmap->pm_spin); 4157 } 4158 pmap_inval_bulk_flush(info->bulk); 4159 } 4160 4161 /* 4162 * WARNING! pmap->pm_spin held 4163 * 4164 * WARNING! eva can overflow our standard ((N + mask) >> bits) 4165 * bounds, resulting in a pd_pindex of 0. To solve the 4166 * problem we use an inclusive range. 4167 */ 4168 static int 4169 pmap_scan_cmp(pv_entry_t pv, void *data) 4170 { 4171 struct pmap_scan_info *info = data; 4172 if (pv->pv_pindex < info->sva_pd_pindex) 4173 return(-1); 4174 if (pv->pv_pindex > info->eva_pd_pindex) 4175 return(1); 4176 return(0); 4177 } 4178 4179 /* 4180 * pmap_scan() by PDs 4181 * 4182 * WARNING! pmap->pm_spin held 4183 */ 4184 static int 4185 pmap_scan_callback(pv_entry_t pv, void *data) 4186 { 4187 struct pmap_scan_info *info = data; 4188 struct pmap *pmap = info->pmap; 4189 pv_entry_t pd_pv; /* A page directory PV */ 4190 pv_entry_t pt_pv; /* A page table PV */ 4191 vm_pindex_t *pt_placemark; 4192 pt_entry_t *ptep; 4193 pt_entry_t oldpte; 4194 vm_offset_t sva; 4195 vm_offset_t eva; 4196 vm_offset_t va_next; 4197 vm_pindex_t pd_pindex; 4198 int error; 4199 4200 /* 4201 * Stop if requested 4202 */ 4203 if (info->stop) 4204 return -1; 4205 4206 /* 4207 * Pull the PD pindex from the pv before releasing the spinlock. 4208 * 4209 * WARNING: pv is faked for kernel pmap scans. 4210 */ 4211 pd_pindex = pv->pv_pindex; 4212 spin_unlock(&pmap->pm_spin); 4213 pv = NULL; /* invalid after spinlock unlocked */ 4214 4215 /* 4216 * Calculate the page range within the PD. SIMPLE pmaps are 4217 * direct-mapped for the entire 2^64 address space. Normal pmaps 4218 * reflect the user and kernel address space which requires 4219 * cannonicalization w/regards to converting pd_pindex's back 4220 * into addresses. 4221 */ 4222 sva = (pd_pindex - pmap_pd_pindex(0)) << PDPSHIFT; 4223 if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 && 4224 (sva & PML4_SIGNMASK)) { 4225 sva |= PML4_SIGNMASK; 4226 } 4227 eva = sva + NBPDP; /* can overflow */ 4228 if (sva < info->sva) 4229 sva = info->sva; 4230 if (eva < info->sva || eva > info->eva) 4231 eva = info->eva; 4232 4233 /* 4234 * NOTE: kernel mappings do not track page table pages, only 4235 * terminal pages. 4236 * 4237 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4. 4238 * However, for the scan to be efficient we try to 4239 * cache items top-down. 4240 */ 4241 pd_pv = NULL; 4242 pt_pv = NULL; 4243 4244 for (; sva < eva; sva = va_next) { 4245 if (info->stop) 4246 break; 4247 if (sva >= VM_MAX_USER_ADDRESS) { 4248 if (pt_pv) { 4249 pv_put(pt_pv); 4250 pt_pv = NULL; 4251 } 4252 goto kernel_skip; 4253 } 4254 4255 /* 4256 * PD cache, scan shortcut if it doesn't exist. 4257 */ 4258 if (pd_pv == NULL) { 4259 pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL); 4260 } else if (pd_pv->pv_pmap != pmap || 4261 pd_pv->pv_pindex != pmap_pd_pindex(sva)) { 4262 pv_put(pd_pv); 4263 pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL); 4264 } 4265 if (pd_pv == NULL) { 4266 va_next = (sva + NBPDP) & ~PDPMASK; 4267 if (va_next < sva) 4268 va_next = eva; 4269 continue; 4270 } 4271 4272 /* 4273 * PT cache 4274 * 4275 * NOTE: The cached pt_pv can be removed from the pmap when 4276 * pmap_dynamic_delete is enabled. 4277 */ 4278 if (pt_pv && (pt_pv->pv_pmap != pmap || 4279 pt_pv->pv_pindex != pmap_pt_pindex(sva))) { 4280 pv_put(pt_pv); 4281 pt_pv = NULL; 4282 } 4283 if (pt_pv == NULL) { 4284 pt_pv = pv_get_try(pmap, pmap_pt_pindex(sva), 4285 &pt_placemark, &error); 4286 if (error) { 4287 pv_put(pd_pv); /* lock order */ 4288 pd_pv = NULL; 4289 if (pt_pv) { 4290 pv_lock(pt_pv); 4291 pv_put(pt_pv); 4292 pt_pv = NULL; 4293 } else { 4294 pv_placemarker_wait(pmap, pt_placemark); 4295 } 4296 va_next = sva; 4297 continue; 4298 } 4299 /* may have to re-check later if pt_pv is NULL here */ 4300 } 4301 4302 /* 4303 * If pt_pv is NULL we either have a shared page table 4304 * page (NOT IMPLEMENTED XXX) and must issue a callback 4305 * specific to that case, or there is no page table page. 4306 * 4307 * Either way we can skip the page table page. 4308 * 4309 * WARNING! pt_pv can also be NULL due to a pv creation 4310 * race where we find it to be NULL and then 4311 * later see a pte_pv. But its possible the pt_pv 4312 * got created inbetween the two operations, so 4313 * we must check. 4314 * 4315 * XXX This should no longer be the case because 4316 * we have pt_placemark. 4317 */ 4318 if (pt_pv == NULL) { 4319 #if 0 4320 /* XXX REMOVED */ 4321 /* 4322 * Possible unmanaged (shared from another pmap) 4323 * page table page. 4324 * 4325 * WARNING! We must hold pt_placemark across the 4326 * *ptep test to prevent misintepreting 4327 * a non-zero *ptep as a shared page 4328 * table page. Hold it across the function 4329 * callback as well for SMP safety. 4330 */ 4331 KKASSERT(0); 4332 ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva)); 4333 if (*ptep & pmap->pmap_bits[PG_V_IDX]) { 4334 info->func(pmap, info, pt_placemark, pd_pv, 4335 sva, ptep, info->arg); 4336 } else { 4337 pv_placemarker_wakeup(pmap, pt_placemark); 4338 } 4339 #else 4340 pv_placemarker_wakeup(pmap, pt_placemark); 4341 #endif 4342 4343 /* 4344 * Done, move to next page table page. 4345 */ 4346 va_next = (sva + NBPDR) & ~PDRMASK; 4347 if (va_next < sva) 4348 va_next = eva; 4349 continue; 4350 } 4351 4352 /* 4353 * From this point in the loop testing pt_pv for non-NULL 4354 * means we are in UVM, else if it is NULL we are in KVM. 4355 * 4356 * Limit our scan to either the end of the va represented 4357 * by the current page table page, or to the end of the 4358 * range being removed. 4359 */ 4360 kernel_skip: 4361 va_next = (sva + NBPDR) & ~PDRMASK; 4362 if (va_next < sva) 4363 va_next = eva; 4364 if (va_next > eva) 4365 va_next = eva; 4366 4367 /* 4368 * Scan the page table for pages. Some pages may not be 4369 * managed (might not have a pv_entry). 4370 * 4371 * There is no page table management for kernel pages so 4372 * pt_pv will be NULL in that case, but otherwise pt_pv 4373 * is non-NULL, locked, and referenced. 4374 */ 4375 4376 /* 4377 * At this point a non-NULL pt_pv means a UVA, and a NULL 4378 * pt_pv means a KVA. 4379 */ 4380 if (pt_pv) 4381 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva)); 4382 else 4383 ptep = vtopte(sva); 4384 4385 while (sva < va_next) { 4386 vm_pindex_t *pte_placemark; 4387 pv_entry_t pte_pv; 4388 4389 /* 4390 * Yield every 64 pages, stop if requested. 4391 */ 4392 if ((++info->count & 63) == 0) 4393 lwkt_user_yield(); 4394 if (info->stop) 4395 break; 4396 4397 /* 4398 * We can shortcut our scan if *ptep == 0. This is 4399 * an unlocked check. 4400 */ 4401 if (*ptep == 0) { 4402 sva += PAGE_SIZE; 4403 ++ptep; 4404 continue; 4405 } 4406 cpu_ccfence(); 4407 4408 /* 4409 * Acquire the pte_placemark. pte_pv's won't exist 4410 * for leaf pages. 4411 * 4412 * A multitude of races are possible here so if we 4413 * cannot lock definite state we clean out our cache 4414 * and break the inner while() loop to force a loop 4415 * up to the top of the for(). 4416 * 4417 * XXX unlock/relock pd_pv, pt_pv, and re-test their 4418 * validity instead of looping up? 4419 */ 4420 pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva), 4421 &pte_placemark, &error); 4422 KKASSERT(pte_pv == NULL); 4423 if (error) { 4424 if (pd_pv) { 4425 pv_put(pd_pv); /* lock order */ 4426 pd_pv = NULL; 4427 } 4428 if (pt_pv) { 4429 pv_put(pt_pv); /* lock order */ 4430 pt_pv = NULL; 4431 } 4432 pv_placemarker_wait(pmap, pte_placemark); 4433 va_next = sva; /* retry */ 4434 break; 4435 } 4436 4437 /* 4438 * Reload *ptep after successfully locking the 4439 * pindex. 4440 */ 4441 cpu_ccfence(); 4442 oldpte = *ptep; 4443 if (oldpte == 0) { 4444 pv_placemarker_wakeup(pmap, pte_placemark); 4445 sva += PAGE_SIZE; 4446 ++ptep; 4447 continue; 4448 } 4449 4450 /* 4451 * We can't hold pd_pv across the callback (because 4452 * we don't pass it to the callback and the callback 4453 * might deadlock) 4454 */ 4455 if (pd_pv) { 4456 vm_page_wire_quick(pd_pv->pv_m); 4457 pv_unlock(pd_pv); 4458 } 4459 4460 /* 4461 * Ready for the callback. The locked placemarker 4462 * is consumed by the callback. 4463 */ 4464 if (oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) { 4465 /* 4466 * Managed pte 4467 */ 4468 KASSERT((oldpte & pmap->pmap_bits[PG_V_IDX]), 4469 ("badC *ptep %016lx/%016lx sva %016lx", 4470 *ptep, oldpte, sva)); 4471 /* 4472 * We must unlock pd_pv across the callback 4473 * to avoid deadlocks on any recursive 4474 * disposal. Re-check that it still exists 4475 * after re-locking. 4476 * 4477 * Call target disposes of pte_placemark 4478 * and may destroy but will not dispose 4479 * of pt_pv. 4480 */ 4481 info->func(pmap, info, pte_placemark, pt_pv, 4482 sva, ptep, info->arg); 4483 } else { 4484 /* 4485 * Unmanaged pte 4486 * 4487 * We must unlock pd_pv across the callback 4488 * to avoid deadlocks on any recursive 4489 * disposal. Re-check that it still exists 4490 * after re-locking. 4491 * 4492 * Call target disposes of pte_placemark 4493 * and may destroy but will not dispose 4494 * of pt_pv. 4495 */ 4496 KASSERT((oldpte & pmap->pmap_bits[PG_V_IDX]), 4497 ("badD *ptep %016lx/%016lx sva %016lx ", 4498 *ptep, oldpte, sva)); 4499 info->func(pmap, info, pte_placemark, pt_pv, 4500 sva, ptep, info->arg); 4501 } 4502 if (pd_pv) { 4503 pv_lock(pd_pv); 4504 if (vm_page_unwire_quick(pd_pv->pv_m)) { 4505 panic("pmap_scan_callback: " 4506 "bad wirecount on pd_pv"); 4507 } 4508 if (pd_pv->pv_pmap == NULL) { 4509 va_next = sva; /* retry */ 4510 break; 4511 } 4512 } 4513 4514 /* 4515 * NOTE: The cached pt_pv can be removed from the 4516 * pmap when pmap_dynamic_delete is enabled, 4517 * which will cause ptep to become stale. 4518 * 4519 * This also means that no pages remain under 4520 * the PT, so we can just break out of the inner 4521 * loop and let the outer loop clean everything 4522 * up. 4523 */ 4524 if (pt_pv && pt_pv->pv_pmap != pmap) 4525 break; 4526 sva += PAGE_SIZE; 4527 ++ptep; 4528 } 4529 } 4530 if (pd_pv) { 4531 pv_put(pd_pv); 4532 pd_pv = NULL; 4533 } 4534 if (pt_pv) { 4535 pv_put(pt_pv); 4536 pt_pv = NULL; 4537 } 4538 if ((++info->count & 7) == 0) 4539 lwkt_user_yield(); 4540 4541 /* 4542 * Relock before returning. 4543 */ 4544 spin_lock(&pmap->pm_spin); 4545 return (0); 4546 } 4547 4548 void 4549 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 4550 { 4551 struct pmap_scan_info info; 4552 4553 info.pmap = pmap; 4554 info.sva = sva; 4555 info.eva = eva; 4556 info.func = pmap_remove_callback; 4557 info.arg = NULL; 4558 pmap_scan(&info, 1); 4559 #if 0 4560 cpu_invltlb(); 4561 if (eva - sva < 1024*1024) { 4562 while (sva < eva) { 4563 cpu_invlpg((void *)sva); 4564 sva += PAGE_SIZE; 4565 } 4566 } 4567 #endif 4568 } 4569 4570 static void 4571 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 4572 { 4573 struct pmap_scan_info info; 4574 4575 info.pmap = pmap; 4576 info.sva = sva; 4577 info.eva = eva; 4578 info.func = pmap_remove_callback; 4579 info.arg = NULL; 4580 pmap_scan(&info, 0); 4581 } 4582 4583 static void 4584 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info, 4585 vm_pindex_t *pte_placemark, pv_entry_t pt_pv, 4586 vm_offset_t va, pt_entry_t *ptep, void *arg __unused) 4587 { 4588 pt_entry_t pte; 4589 vm_page_t oldm; 4590 4591 /* 4592 * Managed or unmanaged pte (pte_placemark is non-NULL) 4593 * 4594 * pt_pv's wire_count is still bumped by unmanaged pages 4595 * so we must decrement it manually. 4596 * 4597 * We have to unwire the target page table page. 4598 */ 4599 pte = *ptep; 4600 if (pte & pmap->pmap_bits[PG_MANAGED_IDX]) { 4601 oldm = PHYS_TO_VM_PAGE(pte & PG_FRAME); 4602 atomic_add_long(&oldm->md.interlock_count, 1); 4603 } else { 4604 oldm = NULL; 4605 } 4606 4607 pte = pmap_inval_bulk(info->bulk, va, ptep, 0); 4608 if (pte & pmap->pmap_bits[PG_MANAGED_IDX]) { 4609 vm_page_t p; 4610 4611 p = PHYS_TO_VM_PAGE(pte & PG_FRAME); 4612 KKASSERT(pte & pmap->pmap_bits[PG_V_IDX]); 4613 if (pte & pmap->pmap_bits[PG_M_IDX]) 4614 vm_page_dirty(p); 4615 if (pte & pmap->pmap_bits[PG_A_IDX]) 4616 vm_page_flag_set(p, PG_REFERENCED); 4617 4618 /* 4619 * (p) is not hard-busied. 4620 * 4621 * We can safely clear PG_MAPPED and PG_WRITEABLE only 4622 * if PG_MAPPEDMULTI is not set, atomically. 4623 */ 4624 pmap_removed_pte(p, pte); 4625 } 4626 if (pte & pmap->pmap_bits[PG_V_IDX]) { 4627 atomic_add_long(&pmap->pm_stats.resident_count, -1); 4628 if (pt_pv && vm_page_unwire_quick(pt_pv->pv_m)) 4629 panic("pmap_remove: insufficient wirecount"); 4630 } 4631 if (pte & pmap->pmap_bits[PG_W_IDX]) 4632 atomic_add_long(&pmap->pm_stats.wired_count, -1); 4633 if (pte & pmap->pmap_bits[PG_G_IDX]) 4634 cpu_invlpg((void *)va); 4635 pv_placemarker_wakeup(pmap, pte_placemark); 4636 if (oldm) { 4637 if ((atomic_fetchadd_long(&oldm->md.interlock_count, -1) & 4638 0x7FFFFFFFFFFFFFFFLU) == 0x4000000000000001LU) { 4639 atomic_clear_long(&oldm->md.interlock_count, 4640 0x4000000000000000LU); 4641 wakeup(&oldm->md.interlock_count); 4642 } 4643 } 4644 } 4645 4646 /* 4647 * Removes this physical page from all physical maps in which it resides. 4648 * Reflects back modify bits to the pager. 4649 * 4650 * This routine may not be called from an interrupt. 4651 * 4652 * The page must be busied by its caller, preventing new ptes from being 4653 * installed. This allows us to assert that pmap_count is zero and safely 4654 * clear the MAPPED and WRITEABLE bits upon completion. 4655 */ 4656 static 4657 void 4658 pmap_remove_all(vm_page_t m) 4659 { 4660 long icount; 4661 int retry; 4662 4663 if (__predict_false(!pmap_initialized)) 4664 return; 4665 4666 /* 4667 * pmap_count doesn't cover fictitious pages, but PG_MAPPED does 4668 * (albeit without certain race protections). 4669 */ 4670 #if 0 4671 if (m->md.pmap_count == 0) 4672 return; 4673 #endif 4674 if ((m->flags & PG_MAPPED) == 0) 4675 return; 4676 4677 retry = ticks + hz * 60; 4678 again: 4679 PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) { 4680 if (!pmap_inval_smp_cmpset(ipmap, iva, iptep, ipte, 0)) 4681 PMAP_PAGE_BACKING_RETRY; 4682 if (ipte & ipmap->pmap_bits[PG_MANAGED_IDX]) { 4683 if (ipte & ipmap->pmap_bits[PG_M_IDX]) 4684 vm_page_dirty(m); 4685 if (ipte & ipmap->pmap_bits[PG_A_IDX]) 4686 vm_page_flag_set(m, PG_REFERENCED); 4687 4688 /* 4689 * NOTE: m is not hard-busied so it is not safe to 4690 * clear PG_MAPPED and PG_WRITEABLE on the 1->0 4691 * transition against them being set in 4692 * pmap_enter(). 4693 */ 4694 pmap_removed_pte(m, ipte); 4695 } 4696 4697 /* 4698 * Cleanup various tracking counters. pt_pv can't go away 4699 * due to our wired ref. 4700 */ 4701 if (ipmap != &kernel_pmap) { 4702 pv_entry_t pt_pv; 4703 4704 spin_lock_shared(&ipmap->pm_spin); 4705 pt_pv = pv_entry_lookup(ipmap, pmap_pt_pindex(iva)); 4706 spin_unlock_shared(&ipmap->pm_spin); 4707 4708 if (pt_pv) { 4709 if (vm_page_unwire_quick(pt_pv->pv_m)) { 4710 panic("pmap_remove_all: bad " 4711 "wire_count on pt_pv"); 4712 } 4713 atomic_add_long( 4714 &ipmap->pm_stats.resident_count, -1); 4715 } 4716 } 4717 if (ipte & ipmap->pmap_bits[PG_W_IDX]) 4718 atomic_add_long(&ipmap->pm_stats.wired_count, -1); 4719 if (ipte & ipmap->pmap_bits[PG_G_IDX]) 4720 cpu_invlpg((void *)iva); 4721 } PMAP_PAGE_BACKING_DONE; 4722 4723 /* 4724 * If our scan lost a pte swap race oldm->md.interlock_count might 4725 * be set from the pmap_enter() code. If so sleep a little and try 4726 * again. 4727 */ 4728 icount = atomic_fetchadd_long(&m->md.interlock_count, 4729 0x8000000000000000LU) + 4730 0x8000000000000000LU; 4731 cpu_ccfence(); 4732 while (icount & 0x3FFFFFFFFFFFFFFFLU) { 4733 tsleep_interlock(&m->md.interlock_count, 0); 4734 if (atomic_fcmpset_long(&m->md.interlock_count, &icount, 4735 icount | 0x4000000000000000LU)) { 4736 tsleep(&m->md.interlock_count, PINTERLOCKED, 4737 "pgunm", 1); 4738 icount = m->md.interlock_count; 4739 if (retry - ticks > 0) 4740 goto again; 4741 panic("pmap_remove_all: cannot return interlock_count " 4742 "to 0 (%p, %ld)", 4743 m, m->md.interlock_count); 4744 } 4745 } 4746 vm_page_flag_clear(m, PG_MAPPED | PG_MAPPEDMULTI | PG_WRITEABLE); 4747 } 4748 4749 /* 4750 * Removes the page from a particular pmap. 4751 * 4752 * The page must be busied by the caller. 4753 */ 4754 void 4755 pmap_remove_specific(pmap_t pmap_match, vm_page_t m) 4756 { 4757 if (__predict_false(!pmap_initialized)) 4758 return; 4759 4760 /* 4761 * PG_MAPPED test works for both non-fictitious and fictitious pages. 4762 */ 4763 if ((m->flags & PG_MAPPED) == 0) 4764 return; 4765 4766 PMAP_PAGE_BACKING_SCAN(m, pmap_match, ipmap, iptep, ipte, iva) { 4767 if (!pmap_inval_smp_cmpset(ipmap, iva, iptep, ipte, 0)) 4768 PMAP_PAGE_BACKING_RETRY; 4769 if (ipte & ipmap->pmap_bits[PG_MANAGED_IDX]) { 4770 if (ipte & ipmap->pmap_bits[PG_M_IDX]) 4771 vm_page_dirty(m); 4772 if (ipte & ipmap->pmap_bits[PG_A_IDX]) 4773 vm_page_flag_set(m, PG_REFERENCED); 4774 4775 /* 4776 * NOTE: m is not hard-busied so it is not safe to 4777 * clear PG_MAPPED and PG_WRITEABLE on the 1->0 4778 * transition against them being set in 4779 * pmap_enter(). 4780 */ 4781 pmap_removed_pte(m, ipte); 4782 } 4783 4784 /* 4785 * Cleanup various tracking counters. pt_pv can't go away 4786 * due to our wired ref. 4787 */ 4788 if (ipmap != &kernel_pmap) { 4789 pv_entry_t pt_pv; 4790 4791 spin_lock_shared(&ipmap->pm_spin); 4792 pt_pv = pv_entry_lookup(ipmap, pmap_pt_pindex(iva)); 4793 spin_unlock_shared(&ipmap->pm_spin); 4794 4795 if (pt_pv) { 4796 atomic_add_long( 4797 &ipmap->pm_stats.resident_count, -1); 4798 if (vm_page_unwire_quick(pt_pv->pv_m)) { 4799 panic("pmap_remove_specific: bad " 4800 "wire_count on pt_pv"); 4801 } 4802 } 4803 } 4804 if (ipte & ipmap->pmap_bits[PG_W_IDX]) 4805 atomic_add_long(&ipmap->pm_stats.wired_count, -1); 4806 if (ipte & ipmap->pmap_bits[PG_G_IDX]) 4807 cpu_invlpg((void *)iva); 4808 } PMAP_PAGE_BACKING_DONE; 4809 } 4810 4811 /* 4812 * Set the physical protection on the specified range of this map 4813 * as requested. This function is typically only used for debug watchpoints 4814 * and COW pages. 4815 * 4816 * This function may not be called from an interrupt if the map is 4817 * not the kernel_pmap. 4818 * 4819 * NOTE! For shared page table pages we just unmap the page. 4820 */ 4821 void 4822 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 4823 { 4824 struct pmap_scan_info info; 4825 /* JG review for NX */ 4826 4827 if (pmap == NULL) 4828 return; 4829 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == VM_PROT_NONE) { 4830 pmap_remove(pmap, sva, eva); 4831 return; 4832 } 4833 if (prot & VM_PROT_WRITE) 4834 return; 4835 info.pmap = pmap; 4836 info.sva = sva; 4837 info.eva = eva; 4838 info.func = pmap_protect_callback; 4839 info.arg = &prot; 4840 pmap_scan(&info, 1); 4841 } 4842 4843 static 4844 void 4845 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info, 4846 vm_pindex_t *pte_placemark, 4847 pv_entry_t pt_pv, vm_offset_t va, 4848 pt_entry_t *ptep, void *arg __unused) 4849 { 4850 pt_entry_t pbits; 4851 pt_entry_t cbits; 4852 vm_page_t m; 4853 4854 again: 4855 pbits = *ptep; 4856 cpu_ccfence(); 4857 cbits = pbits; 4858 if (pbits & pmap->pmap_bits[PG_MANAGED_IDX]) { 4859 cbits &= ~pmap->pmap_bits[PG_A_IDX]; 4860 cbits &= ~pmap->pmap_bits[PG_M_IDX]; 4861 } 4862 /* else unmanaged page, adjust bits, no wire changes */ 4863 4864 if (ptep) { 4865 cbits &= ~pmap->pmap_bits[PG_RW_IDX]; 4866 #ifdef PMAP_DEBUG2 4867 if (pmap_enter_debug > 0) { 4868 --pmap_enter_debug; 4869 kprintf("pmap_protect va=%lx ptep=%p " 4870 "pt_pv=%p cbits=%08lx\n", 4871 va, ptep, pt_pv, cbits 4872 ); 4873 } 4874 #endif 4875 if (pbits != cbits) { 4876 if (!pmap_inval_smp_cmpset(pmap, va, 4877 ptep, pbits, cbits)) { 4878 goto again; 4879 } 4880 } 4881 if (pbits & pmap->pmap_bits[PG_MANAGED_IDX]) { 4882 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 4883 if (pbits & pmap->pmap_bits[PG_A_IDX]) 4884 vm_page_flag_set(m, PG_REFERENCED); 4885 if (pbits & pmap->pmap_bits[PG_M_IDX]) 4886 vm_page_dirty(m); 4887 } 4888 } 4889 pv_placemarker_wakeup(pmap, pte_placemark); 4890 } 4891 4892 /* 4893 * Insert the vm_page (m) at the virtual address (va), replacing any prior 4894 * mapping at that address. Set protection and wiring as requested. 4895 * 4896 * If entry is non-NULL we check to see if the SEG_SIZE optimization is 4897 * possible. If it is we enter the page into the appropriate shared pmap 4898 * hanging off the related VM object instead of the passed pmap, then we 4899 * share the page table page from the VM object's pmap into the current pmap. 4900 * 4901 * NOTE: This routine MUST insert the page into the pmap now, it cannot 4902 * lazy-evaluate. 4903 */ 4904 void 4905 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 4906 boolean_t wired, vm_map_entry_t entry) 4907 { 4908 pv_entry_t pt_pv; /* page table */ 4909 pv_entry_t pte_pv; /* page table entry */ 4910 vm_pindex_t *pte_placemark; 4911 pt_entry_t *ptep; 4912 pt_entry_t origpte; 4913 vm_paddr_t opa; 4914 vm_page_t oldm; 4915 pt_entry_t newpte; 4916 vm_paddr_t pa; 4917 int flags; 4918 int nflags; 4919 4920 if (pmap == NULL) 4921 return; 4922 va = trunc_page(va); 4923 #ifdef PMAP_DIAGNOSTIC 4924 if (va >= KvaEnd) 4925 panic("pmap_enter: toobig"); 4926 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 4927 panic("pmap_enter: invalid to pmap_enter page table " 4928 "pages (va: 0x%lx)", va); 4929 #endif 4930 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) { 4931 kprintf("Warning: pmap_enter called on UVA with " 4932 "kernel_pmap\n"); 4933 #ifdef DDB 4934 db_print_backtrace(); 4935 #endif 4936 } 4937 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) { 4938 kprintf("Warning: pmap_enter called on KVA without" 4939 "kernel_pmap\n"); 4940 #ifdef DDB 4941 db_print_backtrace(); 4942 #endif 4943 } 4944 4945 /* 4946 * Get the locked page table page (pt_pv) for our new page table 4947 * entry, allocating it if necessary. 4948 * 4949 * There is no pte_pv for a terminal pte so the terminal pte will 4950 * be locked via pte_placemark. 4951 * 4952 * Only MMU actions by the CPU itself can modify the ptep out from 4953 * under us. 4954 * 4955 * If the pmap is still being initialized we assume existing 4956 * page tables. 4957 * 4958 * NOTE: Kernel mapppings do not track page table pages 4959 * (i.e. there is no pt_pv pt_pv structure). 4960 * 4961 * NOTE: origpte here is 'tentative', used only to check for 4962 * the degenerate case where the entry already exists and 4963 * matches. 4964 */ 4965 if (__predict_false(pmap_initialized == FALSE)) { 4966 pte_pv = NULL; 4967 pt_pv = NULL; 4968 pte_placemark = NULL; 4969 ptep = vtopte(va); 4970 origpte = *ptep; 4971 } else { 4972 pte_pv = pv_get(pmap, pmap_pte_pindex(va), &pte_placemark); 4973 KKASSERT(pte_pv == NULL); 4974 if (va >= VM_MAX_USER_ADDRESS) { 4975 pt_pv = NULL; 4976 ptep = vtopte(va); 4977 } else { 4978 pt_pv = pmap_allocpte(pmap, pmap_pt_pindex(va), NULL); 4979 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 4980 } 4981 origpte = *ptep; 4982 cpu_ccfence(); 4983 } 4984 4985 pa = VM_PAGE_TO_PHYS(m); 4986 4987 /* 4988 * Calculate the new PTE. 4989 */ 4990 newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) | 4991 pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]); 4992 if (wired) 4993 newpte |= pmap->pmap_bits[PG_W_IDX]; 4994 if (va < VM_MAX_USER_ADDRESS) 4995 newpte |= pmap->pmap_bits[PG_U_IDX]; 4996 if ((m->flags & PG_FICTITIOUS) == 0) 4997 newpte |= pmap->pmap_bits[PG_MANAGED_IDX]; 4998 // if (pmap == &kernel_pmap) 4999 // newpte |= pgeflag; 5000 newpte |= pmap->pmap_cache_bits_pte[m->pat_mode]; 5001 5002 /* 5003 * It is possible for multiple faults to occur in threaded 5004 * environments, the existing pte might be correct. 5005 */ 5006 if (((origpte ^ newpte) & 5007 ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] | 5008 pmap->pmap_bits[PG_A_IDX])) == 0) { 5009 goto done; 5010 } 5011 5012 /* 5013 * Adjust page flags. The page is soft-busied or hard-busied, we 5014 * should be able to safely set PG_* flag bits even with the (shared) 5015 * soft-busy. 5016 * 5017 * The pmap_count and writeable_count is only tracked for 5018 * non-fictitious pages. As a bit of a safety, bump pmap_count 5019 * and set the PG_* bits before mapping the page. If another part 5020 * of the system does not properly hard-busy the page (against our 5021 * soft-busy or hard-busy) in order to remove mappings it might not 5022 * see the pte that we are about to add and thus will not be able to 5023 * drop pmap_count to 0. 5024 * 5025 * The PG_MAPPED and PG_WRITEABLE flags are set for any type of page. 5026 * 5027 * NOTE! PG_MAPPED and PG_WRITEABLE can only be cleared when 5028 * the page is hard-busied AND pmap_count is 0. This 5029 * interlocks our setting of the flags here. 5030 */ 5031 /*vm_page_spin_lock(m);*/ 5032 5033 /* 5034 * In advanced mode we keep track of single mappings verses 5035 * multiple mappings in order to avoid unnecessary vm_page_protect() 5036 * calls (particularly on the kernel_map). 5037 * 5038 * If non-advanced mode we track the mapping count for similar effect. 5039 * 5040 * Avoid modifying the vm_page as much as possible, conditionalize 5041 * updates to reduce cache line ping-ponging. 5042 */ 5043 flags = m->flags; 5044 cpu_ccfence(); 5045 for (;;) { 5046 nflags = PG_MAPPED; 5047 if (newpte & pmap->pmap_bits[PG_RW_IDX]) 5048 nflags |= PG_WRITEABLE; 5049 if (flags & PG_MAPPED) 5050 nflags |= PG_MAPPEDMULTI; 5051 if (flags == (flags | nflags)) 5052 break; 5053 if (atomic_fcmpset_int(&m->flags, &flags, flags | nflags)) 5054 break; 5055 } 5056 /*vm_page_spin_unlock(m);*/ 5057 5058 /* 5059 * A race can develop when replacing an existing mapping. The new 5060 * page has been busied and the pte is placemark-locked, but the 5061 * old page could be ripped out from under us at any time by 5062 * a backing scan. 5063 * 5064 * If we do nothing, a concurrent backing scan may clear 5065 * PG_WRITEABLE and PG_MAPPED before we can act on oldm. 5066 */ 5067 opa = origpte & PG_FRAME; 5068 if (opa && (origpte & pmap->pmap_bits[PG_MANAGED_IDX])) { 5069 oldm = PHYS_TO_VM_PAGE(opa); 5070 KKASSERT(opa == oldm->phys_addr); 5071 KKASSERT(entry != NULL); 5072 atomic_add_long(&oldm->md.interlock_count, 1); 5073 } else { 5074 oldm = NULL; 5075 } 5076 5077 /* 5078 * Swap the new and old PTEs and perform any necessary SMP 5079 * synchronization. 5080 */ 5081 if ((prot & VM_PROT_NOSYNC) || (opa == 0 && pt_pv != NULL)) { 5082 /* 5083 * Explicitly permitted to avoid pmap cpu mask synchronization 5084 * or the prior content of a non-kernel-related pmap was 5085 * invalid. 5086 */ 5087 origpte = atomic_swap_long(ptep, newpte); 5088 if (opa) 5089 cpu_invlpg((void *)va); 5090 } else { 5091 /* 5092 * Not permitted to avoid pmap cpu mask synchronization 5093 * or there prior content being replaced or this is a kernel 5094 * related pmap. 5095 * 5096 * Due to other kernel optimizations, we cannot assume a 5097 * 0->non_zero transition of *ptep can be done with a swap. 5098 */ 5099 origpte = pmap_inval_smp(pmap, va, 1, ptep, newpte); 5100 } 5101 opa = origpte & PG_FRAME; 5102 5103 #ifdef PMAP_DEBUG2 5104 if (pmap_enter_debug > 0) { 5105 --pmap_enter_debug; 5106 kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p" 5107 " pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n", 5108 va, m, 5109 origpte, newpte, ptep, 5110 pte_pv, pt_pv, opa, prot); 5111 } 5112 #endif 5113 5114 /* 5115 * Account for the changes in the pt_pv and pmap. 5116 * 5117 * Retain the same wiring count due to replacing an existing page, 5118 * or bump the wiring count for a new page. 5119 */ 5120 if (pt_pv && opa == 0) { 5121 vm_page_wire_quick(pt_pv->pv_m); 5122 atomic_add_long(&pt_pv->pv_pmap->pm_stats.resident_count, 1); 5123 } 5124 if (wired && (origpte & pmap->pmap_bits[PG_W_IDX]) == 0) 5125 atomic_add_long(&pmap->pm_stats.wired_count, 1); 5126 5127 /* 5128 * Account for the removal of the old page. pmap and pt_pv stats 5129 * have already been fully adjusted for both. 5130 * 5131 * WARNING! oldm is not soft or hard-busied. The pte at worst can 5132 * only be removed out from under us since we hold the 5133 * placemarker. So if it is still there, it must not have 5134 * changed. 5135 * 5136 * WARNING! A backing scan can clear PG_WRITEABLE and/or PG_MAPPED 5137 * and rip oldm away from us, possibly even freeing or 5138 * paging it, and not setting our dirtying below. 5139 * 5140 * To deal with this, oldm->md.interlock_count is bumped 5141 * to indicate that we might (only might) have won the pte 5142 * swap race, and then released below. 5143 */ 5144 if (opa && (origpte & pmap->pmap_bits[PG_MANAGED_IDX])) { 5145 KKASSERT(oldm == PHYS_TO_VM_PAGE(opa)); 5146 if (origpte & pmap->pmap_bits[PG_M_IDX]) 5147 vm_page_dirty(oldm); 5148 if (origpte & pmap->pmap_bits[PG_A_IDX]) 5149 vm_page_flag_set(oldm, PG_REFERENCED); 5150 5151 /* 5152 * NOTE: oldm is not hard-busied so it is not safe to 5153 * clear PG_MAPPED and PG_WRITEABLE on the 1->0 5154 * transition against them being set in 5155 * pmap_enter(). 5156 */ 5157 pmap_removed_pte(oldm, origpte); 5158 } 5159 if (oldm) { 5160 if ((atomic_fetchadd_long(&oldm->md.interlock_count, -1) & 5161 0x7FFFFFFFFFFFFFFFLU) == 0x4000000000000001LU) { 5162 atomic_clear_long(&oldm->md.interlock_count, 5163 0x4000000000000000LU); 5164 wakeup(&oldm->md.interlock_count); 5165 } 5166 } 5167 5168 done: 5169 KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 || 5170 (m->flags & PG_MAPPED)); 5171 5172 /* 5173 * Cleanup the pv entry, allowing other accessors. If the new page 5174 * is not managed but we have a pte_pv (which was locking our 5175 * operation), we can free it now. pte_pv->pv_m should be NULL. 5176 */ 5177 if (pte_placemark) 5178 pv_placemarker_wakeup(pmap, pte_placemark); 5179 if (pt_pv) 5180 pv_put(pt_pv); 5181 } 5182 5183 /* 5184 * Make a temporary mapping for a physical address. This is only intended 5185 * to be used for panic dumps. 5186 * 5187 * The caller is responsible for calling smp_invltlb(). 5188 */ 5189 void * 5190 pmap_kenter_temporary(vm_paddr_t pa, long i) 5191 { 5192 pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa); 5193 return ((void *)crashdumpmap); 5194 } 5195 5196 #if 0 5197 #define MAX_INIT_PT (96) 5198 5199 /* 5200 * This routine preloads the ptes for a given object into the specified pmap. 5201 * This eliminates the blast of soft faults on process startup and 5202 * immediately after an mmap. 5203 */ 5204 static int pmap_object_init_pt_callback(vm_page_t p, void *data); 5205 #endif 5206 5207 void 5208 pmap_object_init_pt(pmap_t pmap, vm_map_entry_t entry, 5209 vm_offset_t addr, vm_size_t size, int limit) 5210 { 5211 #if 0 5212 vm_prot_t prot = entry->protection; 5213 vm_object_t object = entry->ba.object; 5214 vm_pindex_t pindex = atop(entry->ba.offset + (addr - entry->ba.start)); 5215 struct rb_vm_page_scan_info info; 5216 struct lwp *lp; 5217 vm_size_t psize; 5218 5219 /* 5220 * We can't preinit if read access isn't set or there is no pmap 5221 * or object. 5222 */ 5223 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL) 5224 return; 5225 5226 /* 5227 * We can't preinit if the pmap is not the current pmap 5228 */ 5229 lp = curthread->td_lwp; 5230 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace)) 5231 return; 5232 5233 /* 5234 * Misc additional checks 5235 */ 5236 psize = x86_64_btop(size); 5237 5238 if ((object->type != OBJT_VNODE) || 5239 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 5240 (object->resident_page_count > MAX_INIT_PT))) { 5241 return; 5242 } 5243 5244 if (pindex + psize > object->size) { 5245 if (object->size < pindex) 5246 return; 5247 psize = object->size - pindex; 5248 } 5249 5250 if (psize == 0) 5251 return; 5252 5253 /* 5254 * If everything is segment-aligned do not pre-init here. Instead 5255 * allow the normal vm_fault path to pass a segment hint to 5256 * pmap_enter() which will then use an object-referenced shared 5257 * page table page. 5258 */ 5259 if ((addr & SEG_MASK) == 0 && 5260 (ctob(psize) & SEG_MASK) == 0 && 5261 (ctob(pindex) & SEG_MASK) == 0) { 5262 return; 5263 } 5264 5265 /* 5266 * Use a red-black scan to traverse the requested range and load 5267 * any valid pages found into the pmap. 5268 * 5269 * We cannot safely scan the object's memq without holding the 5270 * object token. 5271 */ 5272 info.start_pindex = pindex; 5273 info.end_pindex = pindex + psize - 1; 5274 info.limit = limit; 5275 info.mpte = NULL; 5276 info.addr = addr; 5277 info.pmap = pmap; 5278 info.object = object; 5279 info.entry = entry; 5280 5281 /* 5282 * By using the NOLK scan, the callback function must be sure 5283 * to return -1 if the VM page falls out of the object. 5284 */ 5285 vm_object_hold_shared(object); 5286 vm_page_rb_tree_RB_SCAN_NOLK(&object->rb_memq, rb_vm_page_scancmp, 5287 pmap_object_init_pt_callback, &info); 5288 vm_object_drop(object); 5289 #endif 5290 } 5291 5292 #if 0 5293 5294 static 5295 int 5296 pmap_object_init_pt_callback(vm_page_t p, void *data) 5297 { 5298 struct rb_vm_page_scan_info *info = data; 5299 vm_pindex_t rel_index; 5300 int hard_busy; 5301 5302 /* 5303 * don't allow an madvise to blow away our really 5304 * free pages allocating pv entries. 5305 */ 5306 if ((info->limit & MAP_PREFAULT_MADVISE) && 5307 vmstats.v_free_count < vmstats.v_free_reserved) { 5308 return(-1); 5309 } 5310 5311 /* 5312 * Ignore list markers and ignore pages we cannot instantly 5313 * busy (while holding the object token). 5314 */ 5315 if (p->flags & PG_MARKER) 5316 return 0; 5317 hard_busy = 0; 5318 again: 5319 if (hard_busy) { 5320 if (vm_page_busy_try(p, TRUE)) 5321 return 0; 5322 } else { 5323 if (vm_page_sbusy_try(p)) 5324 return 0; 5325 } 5326 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 5327 (p->flags & PG_FICTITIOUS) == 0) { 5328 if ((p->queue - p->pc) == PQ_CACHE) { 5329 if (hard_busy == 0) { 5330 vm_page_sbusy_drop(p); 5331 hard_busy = 1; 5332 goto again; 5333 } 5334 vm_page_deactivate(p); 5335 } 5336 rel_index = p->pindex - info->start_pindex; 5337 pmap_enter(info->pmap, info->addr + x86_64_ptob(rel_index), p, 5338 VM_PROT_READ, FALSE, info->entry); 5339 } 5340 if (hard_busy) 5341 vm_page_wakeup(p); 5342 else 5343 vm_page_sbusy_drop(p); 5344 5345 /* 5346 * We are using an unlocked scan (that is, the scan expects its 5347 * current element to remain in the tree on return). So we have 5348 * to check here and abort the scan if it isn't. 5349 */ 5350 if (p->object != info->object) 5351 return -1; 5352 lwkt_yield(); 5353 return(0); 5354 } 5355 5356 #endif 5357 5358 /* 5359 * Return TRUE if the pmap is in shape to trivially pre-fault the specified 5360 * address. 5361 * 5362 * Returns FALSE if it would be non-trivial or if a pte is already loaded 5363 * into the slot. 5364 * 5365 * The address must reside within a vm_map mapped range to ensure that the 5366 * page table doesn't get ripped out from under us. 5367 * 5368 * XXX This is safe only because page table pages are not freed. 5369 */ 5370 int 5371 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr) 5372 { 5373 pt_entry_t *pte; 5374 5375 /*spin_lock(&pmap->pm_spin);*/ 5376 if ((pte = pmap_pte(pmap, addr)) != NULL) { 5377 if (*pte & pmap->pmap_bits[PG_V_IDX]) { 5378 /*spin_unlock(&pmap->pm_spin);*/ 5379 return FALSE; 5380 } 5381 } 5382 /*spin_unlock(&pmap->pm_spin);*/ 5383 return TRUE; 5384 } 5385 5386 /* 5387 * Change the wiring attribute for a pmap/va pair. The mapping must already 5388 * exist in the pmap. The mapping may or may not be managed. The wiring in 5389 * the page is not changed, the page is returned so the caller can adjust 5390 * its wiring (the page is not locked in any way). 5391 * 5392 * Wiring is not a hardware characteristic so there is no need to invalidate 5393 * TLB. However, in an SMP environment we must use a locked bus cycle to 5394 * update the pte (if we are not using the pmap_inval_*() API that is)... 5395 * it's ok to do this for simple wiring changes. 5396 */ 5397 vm_page_t 5398 pmap_unwire(pmap_t pmap, vm_offset_t va) 5399 { 5400 pt_entry_t *ptep; 5401 pv_entry_t pt_pv; 5402 vm_paddr_t pa; 5403 vm_page_t m; 5404 5405 if (pmap == NULL) 5406 return NULL; 5407 5408 /* 5409 * Assume elements in the kernel pmap are stable 5410 */ 5411 if (pmap == &kernel_pmap) { 5412 if (pmap_pt(pmap, va) == 0) 5413 return NULL; 5414 ptep = pmap_pte_quick(pmap, va); 5415 if (pmap_pte_v(pmap, ptep)) { 5416 if (pmap_pte_w(pmap, ptep)) 5417 atomic_add_long(&pmap->pm_stats.wired_count,-1); 5418 atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]); 5419 pa = *ptep & PG_FRAME; 5420 m = PHYS_TO_VM_PAGE(pa); 5421 } else { 5422 m = NULL; 5423 } 5424 } else { 5425 /* 5426 * We can only [un]wire pmap-local pages (we cannot wire 5427 * shared pages) 5428 */ 5429 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL); 5430 if (pt_pv == NULL) 5431 return NULL; 5432 5433 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 5434 if ((*ptep & pmap->pmap_bits[PG_V_IDX]) == 0) { 5435 pv_put(pt_pv); 5436 return NULL; 5437 } 5438 5439 if (pmap_pte_w(pmap, ptep)) { 5440 atomic_add_long(&pt_pv->pv_pmap->pm_stats.wired_count, 5441 -1); 5442 } 5443 /* XXX else return NULL so caller doesn't unwire m ? */ 5444 5445 atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]); 5446 5447 pa = *ptep & PG_FRAME; 5448 m = PHYS_TO_VM_PAGE(pa); /* held by wired count */ 5449 pv_put(pt_pv); 5450 } 5451 return m; 5452 } 5453 5454 /* 5455 * Copy the range specified by src_addr/len from the source map to 5456 * the range dst_addr/len in the destination map. 5457 * 5458 * This routine is only advisory and need not do anything. 5459 */ 5460 void 5461 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 5462 vm_size_t len, vm_offset_t src_addr) 5463 { 5464 } 5465 5466 /* 5467 * pmap_zero_page: 5468 * 5469 * Zero the specified physical page. 5470 * 5471 * This function may be called from an interrupt and no locking is 5472 * required. 5473 */ 5474 void 5475 pmap_zero_page(vm_paddr_t phys) 5476 { 5477 vm_offset_t va = PHYS_TO_DMAP(phys); 5478 5479 pagezero((void *)va); 5480 } 5481 5482 /* 5483 * pmap_zero_page: 5484 * 5485 * Zero part of a physical page by mapping it into memory and clearing 5486 * its contents with bzero. 5487 * 5488 * off and size may not cover an area beyond a single hardware page. 5489 */ 5490 void 5491 pmap_zero_page_area(vm_paddr_t phys, int off, int size) 5492 { 5493 vm_offset_t virt = PHYS_TO_DMAP(phys); 5494 5495 bzero((char *)virt + off, size); 5496 } 5497 5498 /* 5499 * pmap_copy_page: 5500 * 5501 * Copy the physical page from the source PA to the target PA. 5502 * This function may be called from an interrupt. No locking 5503 * is required. 5504 */ 5505 void 5506 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst) 5507 { 5508 vm_offset_t src_virt, dst_virt; 5509 5510 src_virt = PHYS_TO_DMAP(src); 5511 dst_virt = PHYS_TO_DMAP(dst); 5512 bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE); 5513 } 5514 5515 /* 5516 * pmap_copy_page_frag: 5517 * 5518 * Copy the physical page from the source PA to the target PA. 5519 * This function may be called from an interrupt. No locking 5520 * is required. 5521 */ 5522 void 5523 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes) 5524 { 5525 vm_offset_t src_virt, dst_virt; 5526 5527 src_virt = PHYS_TO_DMAP(src); 5528 dst_virt = PHYS_TO_DMAP(dst); 5529 5530 bcopy((char *)src_virt + (src & PAGE_MASK), 5531 (char *)dst_virt + (dst & PAGE_MASK), 5532 bytes); 5533 } 5534 5535 /* 5536 * Remove all pages from specified address space this aids process exit 5537 * speeds. Also, this code may be special cased for the current process 5538 * only. 5539 */ 5540 void 5541 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5542 { 5543 pmap_remove_noinval(pmap, sva, eva); 5544 cpu_invltlb(); 5545 } 5546 5547 /* 5548 * pmap_testbit tests bits in pte's note that the testbit/clearbit 5549 * routines are inline, and a lot of things compile-time evaluate. 5550 * 5551 * Currently only used to test the 'M'odified bit. If the page 5552 * is not PG_WRITEABLE, the 'M'odified bit cannot be set and we 5553 * return immediately. Fictitious pages do not track this bit. 5554 */ 5555 static 5556 boolean_t 5557 pmap_testbit(vm_page_t m, int bit) 5558 { 5559 int res = FALSE; 5560 5561 if (__predict_false(!pmap_initialized || (m->flags & PG_FICTITIOUS))) 5562 return FALSE; 5563 /* 5564 * Nothing to do if all the mappings are already read-only. 5565 * The page's [M]odify bits have already been synchronized 5566 * to the vm_page_t and cleaned out. 5567 */ 5568 if (bit == PG_M_IDX && (m->flags & PG_WRITEABLE) == 0) 5569 return FALSE; 5570 5571 /* 5572 * Iterate the mapping 5573 */ 5574 PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) { 5575 if (ipte & ipmap->pmap_bits[bit]) { 5576 res = TRUE; 5577 break; 5578 } 5579 } PMAP_PAGE_BACKING_DONE; 5580 return res; 5581 } 5582 5583 /* 5584 * This routine is used to modify bits in ptes. Only one bit should be 5585 * specified. PG_RW requires special handling. This call works with 5586 * any sort of mapped page. PG_FICTITIOUS pages might not be optimal. 5587 * 5588 * Caller must NOT hold any spin locks 5589 * Caller must hold (m) hard-busied 5590 * 5591 * NOTE: When clearing PG_M we could also (not implemented) drop 5592 * through to the PG_RW code and clear PG_RW too, forcing 5593 * a fault on write to redetect PG_M for virtual kernels, but 5594 * it isn't necessary since virtual kernels invalidate the 5595 * pte when they clear the VPTE_M bit in their virtual page 5596 * tables. 5597 * 5598 * NOTE: Does not re-dirty the page when clearing only PG_M. 5599 * 5600 * NOTE: Because we do not lock the pv, *pte can be in a state of 5601 * flux. Despite this the value of *pte is still somewhat 5602 * related while we hold the vm_page spin lock. 5603 * 5604 * *pte can be zero due to this race. Since we are clearing 5605 * bits we basically do no harm when this race occurs. 5606 */ 5607 static __inline 5608 void 5609 pmap_clearbit(vm_page_t m, int bit_index) 5610 { 5611 pt_entry_t npte; 5612 int retry; 5613 long icount; 5614 5615 /* 5616 * Too early in the boot 5617 */ 5618 if (__predict_false(!pmap_initialized)) { 5619 if (bit_index == PG_RW_IDX) 5620 vm_page_flag_clear(m, PG_WRITEABLE); 5621 return; 5622 } 5623 if ((m->flags & (PG_MAPPED | PG_WRITEABLE)) == 0) 5624 return; 5625 5626 /* 5627 * Being asked to clear other random bits, we don't track them 5628 * so we have to iterate. 5629 * 5630 * pmap_clear_reference() is called (into here) with the page 5631 * hard-busied to check whether the page is still mapped and 5632 * will clear PG_MAPPED and PG_WRITEABLE if it isn't. 5633 */ 5634 if (bit_index != PG_RW_IDX) { 5635 #if 0 5636 long icount; 5637 5638 icount = 0; 5639 #endif 5640 PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) { 5641 #if 0 5642 ++icount; 5643 #endif 5644 if (ipte & ipmap->pmap_bits[bit_index]) { 5645 atomic_clear_long(iptep, 5646 ipmap->pmap_bits[bit_index]); 5647 } 5648 } PMAP_PAGE_BACKING_DONE; 5649 #if 0 5650 if (icount == 0) { 5651 icount = atomic_fetchadd_long(&m->md.interlock_count, 5652 0x8000000000000000LU); 5653 if ((icount & 0x3FFFFFFFFFFFFFFFLU) == 0) { 5654 vm_page_flag_clear(m, PG_MAPPED | 5655 PG_MAPPEDMULTI | 5656 PG_WRITEABLE); 5657 } 5658 } 5659 #endif 5660 return; 5661 } 5662 5663 /* 5664 * Being asked to clear the RW bit. 5665 * 5666 * Nothing to do if all the mappings are already read-only 5667 */ 5668 if ((m->flags & PG_WRITEABLE) == 0) 5669 return; 5670 5671 /* 5672 * Iterate the mappings and check. 5673 */ 5674 retry = ticks + hz * 60; 5675 again: 5676 /* 5677 * Clear PG_RW. This also clears PG_M and marks the page dirty if 5678 * PG_M was set. 5679 * 5680 * Since the caller holds the page hard-busied we can safely clear 5681 * PG_WRITEABLE, and callers expect us to for the PG_RW_IDX path. 5682 */ 5683 PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) { 5684 #if 0 5685 if ((ipte & ipmap->pmap_bits[PG_MANAGED_IDX]) == 0) 5686 continue; 5687 #endif 5688 if ((ipte & ipmap->pmap_bits[PG_RW_IDX]) == 0) 5689 continue; 5690 npte = ipte & ~(ipmap->pmap_bits[PG_RW_IDX] | 5691 ipmap->pmap_bits[PG_M_IDX]); 5692 if (!pmap_inval_smp_cmpset(ipmap, iva, iptep, ipte, npte)) 5693 PMAP_PAGE_BACKING_RETRY; 5694 if (ipte & ipmap->pmap_bits[PG_M_IDX]) 5695 vm_page_dirty(m); 5696 5697 /* 5698 * NOTE: m is not hard-busied so it is not safe to 5699 * clear PG_WRITEABLE on the 1->0 transition 5700 * against it being set in pmap_enter(). 5701 * 5702 * pmap_count and writeable_count are only applicable 5703 * to non-fictitious pages (PG_MANAGED_IDX from pte) 5704 */ 5705 } PMAP_PAGE_BACKING_DONE; 5706 5707 /* 5708 * If our scan lost a pte swap race oldm->md.interlock_count might 5709 * be set from the pmap_enter() code. If so sleep a little and try 5710 * again. 5711 * 5712 * Use an atomic op to access interlock_count to ensure ordering. 5713 */ 5714 icount = atomic_fetchadd_long(&m->md.interlock_count, 5715 0x8000000000000000LU) + 5716 0x8000000000000000LU; 5717 cpu_ccfence(); 5718 while (icount & 0x3FFFFFFFFFFFFFFFLU) { 5719 tsleep_interlock(&m->md.interlock_count, 0); 5720 if (atomic_fcmpset_long(&m->md.interlock_count, &icount, 5721 icount | 0x4000000000000000LU)) { 5722 tsleep(&m->md.interlock_count, PINTERLOCKED, 5723 "pgunm", 1); 5724 icount = m->md.interlock_count; 5725 if (retry - ticks > 0) 5726 goto again; 5727 panic("pmap_clearbit: cannot return interlock_count " 5728 "to 0 (%p, %ld)", 5729 m, m->md.interlock_count); 5730 } 5731 } 5732 vm_page_flag_clear(m, PG_WRITEABLE); 5733 } 5734 5735 /* 5736 * Lower the permission for all mappings to a given page. 5737 * 5738 * Page must be hard-busied by caller. Because the page is busied by the 5739 * caller, this should not be able to race a pmap_enter(). 5740 */ 5741 void 5742 pmap_page_protect(vm_page_t m, vm_prot_t prot) 5743 { 5744 /* JG NX support? */ 5745 if ((prot & VM_PROT_WRITE) == 0) { 5746 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 5747 /* 5748 * NOTE: pmap_clearbit(.. PG_RW) also clears 5749 * the PG_WRITEABLE flag in (m). 5750 */ 5751 pmap_clearbit(m, PG_RW_IDX); 5752 } else { 5753 pmap_remove_all(m); 5754 } 5755 } 5756 } 5757 5758 vm_paddr_t 5759 pmap_phys_address(vm_pindex_t ppn) 5760 { 5761 return (x86_64_ptob(ppn)); 5762 } 5763 5764 /* 5765 * Return a count of reference bits for a page, clearing those bits. 5766 * It is not necessary for every reference bit to be cleared, but it 5767 * is necessary that 0 only be returned when there are truly no 5768 * reference bits set. 5769 * 5770 * XXX: The exact number of bits to check and clear is a matter that 5771 * should be tested and standardized at some point in the future for 5772 * optimal aging of shared pages. 5773 * 5774 * This routine may not block. 5775 */ 5776 int 5777 pmap_ts_referenced(vm_page_t m) 5778 { 5779 int rval = 0; 5780 pt_entry_t npte; 5781 5782 if (__predict_false(!pmap_initialized || (m->flags & PG_FICTITIOUS))) 5783 return rval; 5784 PMAP_PAGE_BACKING_SCAN(m, NULL, ipmap, iptep, ipte, iva) { 5785 if (ipte & ipmap->pmap_bits[PG_A_IDX]) { 5786 npte = ipte & ~ipmap->pmap_bits[PG_A_IDX]; 5787 if (!atomic_cmpset_long(iptep, ipte, npte)) 5788 PMAP_PAGE_BACKING_RETRY; 5789 ++rval; 5790 if (rval > 4) 5791 break; 5792 } 5793 } PMAP_PAGE_BACKING_DONE; 5794 return rval; 5795 } 5796 5797 /* 5798 * pmap_is_modified: 5799 * 5800 * Return whether or not the specified physical page was modified 5801 * in any physical maps. 5802 */ 5803 boolean_t 5804 pmap_is_modified(vm_page_t m) 5805 { 5806 boolean_t res; 5807 5808 res = pmap_testbit(m, PG_M_IDX); 5809 return (res); 5810 } 5811 5812 /* 5813 * Clear the modify bit on the vm_page. 5814 * 5815 * The page must be hard-busied. 5816 */ 5817 void 5818 pmap_clear_modify(vm_page_t m) 5819 { 5820 pmap_clearbit(m, PG_M_IDX); 5821 } 5822 5823 /* 5824 * pmap_clear_reference: 5825 * 5826 * Clear the reference bit on the specified physical page. 5827 */ 5828 void 5829 pmap_clear_reference(vm_page_t m) 5830 { 5831 pmap_clearbit(m, PG_A_IDX); 5832 } 5833 5834 /* 5835 * Miscellaneous support routines follow 5836 */ 5837 5838 static 5839 void 5840 x86_64_protection_init(void) 5841 { 5842 uint64_t *kp; 5843 int prot; 5844 5845 /* 5846 * NX supported? (boot time loader.conf override only) 5847 * 5848 * -1 Automatic (sets mode 1) 5849 * 0 Disabled 5850 * 1 NX implemented, differentiates PROT_READ vs PROT_READ|PROT_EXEC 5851 * 2 NX implemented for all cases 5852 */ 5853 TUNABLE_INT_FETCH("machdep.pmap_nx_enable", &pmap_nx_enable); 5854 if ((amd_feature & AMDID_NX) == 0) { 5855 pmap_bits_default[PG_NX_IDX] = 0; 5856 pmap_nx_enable = 0; 5857 } else if (pmap_nx_enable < 0) { 5858 pmap_nx_enable = 1; /* default to mode 1 (READ) */ 5859 } 5860 5861 /* 5862 * 0 is basically read-only access, but also set the NX (no-execute) 5863 * bit when VM_PROT_EXECUTE is not specified. 5864 */ 5865 kp = protection_codes; 5866 for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) { 5867 switch (prot) { 5868 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 5869 /* 5870 * This case handled elsewhere 5871 */ 5872 *kp = 0; 5873 break; 5874 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 5875 /* 5876 * Read-only is 0|NX (pmap_nx_enable mode >= 1) 5877 */ 5878 if (pmap_nx_enable >= 1) 5879 *kp = pmap_bits_default[PG_NX_IDX]; 5880 break; 5881 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 5882 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 5883 /* 5884 * Execute requires read access 5885 */ 5886 *kp = 0; 5887 break; 5888 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 5889 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 5890 /* 5891 * Write without execute is RW|NX 5892 * (pmap_nx_enable mode >= 2) 5893 */ 5894 *kp = pmap_bits_default[PG_RW_IDX]; 5895 if (pmap_nx_enable >= 2) 5896 *kp |= pmap_bits_default[PG_NX_IDX]; 5897 break; 5898 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 5899 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 5900 /* 5901 * Write with execute is RW 5902 */ 5903 *kp = pmap_bits_default[PG_RW_IDX]; 5904 break; 5905 } 5906 ++kp; 5907 } 5908 } 5909 5910 /* 5911 * Map a set of physical memory pages into the kernel virtual 5912 * address space. Return a pointer to where it is mapped. This 5913 * routine is intended to be used for mapping device memory, 5914 * NOT real memory. 5915 * 5916 * NOTE: We can't use pgeflag unless we invalidate the pages one at 5917 * a time. 5918 * 5919 * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE} 5920 * work whether the cpu supports PAT or not. The remaining PAT 5921 * attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu 5922 * supports PAT. 5923 */ 5924 void * 5925 pmap_mapdev(vm_paddr_t pa, vm_size_t size) 5926 { 5927 return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 5928 } 5929 5930 void * 5931 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size) 5932 { 5933 return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 5934 } 5935 5936 void * 5937 pmap_mapbios(vm_paddr_t pa, vm_size_t size) 5938 { 5939 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 5940 } 5941 5942 /* 5943 * Map a set of physical memory pages into the kernel virtual 5944 * address space. Return a pointer to where it is mapped. This 5945 * routine is intended to be used for mapping device memory, 5946 * NOT real memory. 5947 */ 5948 void * 5949 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 5950 { 5951 vm_offset_t va, tmpva, offset; 5952 pt_entry_t *pte; 5953 vm_size_t tmpsize; 5954 5955 offset = pa & PAGE_MASK; 5956 size = roundup(offset + size, PAGE_SIZE); 5957 5958 va = kmem_alloc_nofault(&kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE); 5959 if (va == 0) 5960 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 5961 5962 pa = pa & ~PAGE_MASK; 5963 for (tmpva = va, tmpsize = size; tmpsize > 0;) { 5964 pte = vtopte(tmpva); 5965 *pte = pa | 5966 kernel_pmap.pmap_bits[PG_RW_IDX] | 5967 kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */ 5968 kernel_pmap.pmap_cache_bits_pte[mode]; 5969 tmpsize -= PAGE_SIZE; 5970 tmpva += PAGE_SIZE; 5971 pa += PAGE_SIZE; 5972 } 5973 pmap_invalidate_range(&kernel_pmap, va, va + size); 5974 pmap_invalidate_cache_range(va, va + size); 5975 5976 return ((void *)(va + offset)); 5977 } 5978 5979 void 5980 pmap_unmapdev(vm_offset_t va, vm_size_t size) 5981 { 5982 vm_offset_t base, offset; 5983 5984 base = va & ~PAGE_MASK; 5985 offset = va & PAGE_MASK; 5986 size = roundup(offset + size, PAGE_SIZE); 5987 pmap_qremove(va, size >> PAGE_SHIFT); 5988 kmem_free(&kernel_map, base, size); 5989 } 5990 5991 /* 5992 * Sets the memory attribute for the specified page. 5993 */ 5994 void 5995 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5996 { 5997 5998 m->pat_mode = ma; 5999 6000 /* 6001 * If "m" is a normal page, update its direct mapping. This update 6002 * can be relied upon to perform any cache operations that are 6003 * required for data coherence. 6004 */ 6005 if ((m->flags & PG_FICTITIOUS) == 0) 6006 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode); 6007 } 6008 6009 /* 6010 * Change the PAT attribute on an existing kernel memory map. Caller 6011 * must ensure that the virtual memory in question is not accessed 6012 * during the adjustment. 6013 * 6014 * If the va is within the DMAP we cannot use vtopte() because the DMAP 6015 * utilizes 2MB or 1GB pages. 2MB is forced atm so calculate the pd_entry 6016 * pointer based on that. 6017 */ 6018 void 6019 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode) 6020 { 6021 pt_entry_t *pte; 6022 vm_offset_t base; 6023 int changed = 0; 6024 6025 if (va == 0) 6026 panic("pmap_change_attr: va is NULL"); 6027 base = trunc_page(va); 6028 6029 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { 6030 pd_entry_t *pd; 6031 6032 KKASSERT(va < DMapMaxAddress); 6033 pd = (pd_entry_t *)PHYS_TO_DMAP(DMPDphys); 6034 pd += (va - DMAP_MIN_ADDRESS) >> PDRSHIFT; 6035 6036 while ((long)count > 0) { 6037 *pd = 6038 (*pd & ~(pd_entry_t)(kernel_pmap.pmap_cache_mask_pde)) | 6039 kernel_pmap.pmap_cache_bits_pde[mode]; 6040 count -= NBPDR / PAGE_SIZE; 6041 va += NBPDR; 6042 ++pd; 6043 } 6044 } else { 6045 while (count) { 6046 pte = vtopte(va); 6047 *pte = 6048 (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask_pte)) | 6049 kernel_pmap.pmap_cache_bits_pte[mode]; 6050 --count; 6051 va += PAGE_SIZE; 6052 } 6053 } 6054 6055 changed = 1; /* XXX: not optimal */ 6056 6057 /* 6058 * Flush CPU caches if required to make sure any data isn't cached that 6059 * shouldn't be, etc. 6060 */ 6061 if (changed) { 6062 pmap_invalidate_range(&kernel_pmap, base, va); 6063 pmap_invalidate_cache_range(base, va); 6064 } 6065 } 6066 6067 /* 6068 * perform the pmap work for mincore 6069 */ 6070 int 6071 pmap_mincore(pmap_t pmap, vm_offset_t addr) 6072 { 6073 pt_entry_t *ptep, pte; 6074 vm_page_t m; 6075 int val = 0; 6076 6077 ptep = pmap_pte(pmap, addr); 6078 6079 if (ptep && (pte = *ptep) != 0) { 6080 vm_offset_t pa; 6081 6082 val = MINCORE_INCORE; 6083 pa = pte & PG_FRAME; 6084 if (pte & pmap->pmap_bits[PG_MANAGED_IDX]) 6085 m = PHYS_TO_VM_PAGE(pa); 6086 else 6087 m = NULL; 6088 6089 /* 6090 * Modified by us 6091 */ 6092 if (pte & pmap->pmap_bits[PG_M_IDX]) 6093 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 6094 6095 /* 6096 * Modified by someone 6097 */ 6098 else if (m && (m->dirty || pmap_is_modified(m))) 6099 val |= MINCORE_MODIFIED_OTHER; 6100 6101 /* 6102 * Referenced by us, or someone else. 6103 */ 6104 if (pte & pmap->pmap_bits[PG_A_IDX]) { 6105 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 6106 } else if (m && ((m->flags & PG_REFERENCED) || 6107 pmap_ts_referenced(m))) { 6108 val |= MINCORE_REFERENCED_OTHER; 6109 vm_page_flag_set(m, PG_REFERENCED); 6110 } 6111 } 6112 return val; 6113 } 6114 6115 /* 6116 * Replace p->p_vmspace with a new one. If adjrefs is non-zero the new 6117 * vmspace will be ref'd and the old one will be deref'd. 6118 * 6119 * The vmspace for all lwps associated with the process will be adjusted 6120 * and cr3 will be reloaded if any lwp is the current lwp. 6121 * 6122 * The process must hold the vmspace->vm_map.token for oldvm and newvm 6123 */ 6124 void 6125 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs) 6126 { 6127 struct vmspace *oldvm; 6128 struct lwp *lp; 6129 6130 oldvm = p->p_vmspace; 6131 if (oldvm != newvm) { 6132 if (adjrefs) 6133 vmspace_ref(newvm); 6134 p->p_vmspace = newvm; 6135 KKASSERT(p->p_nthreads == 1); 6136 lp = RB_ROOT(&p->p_lwp_tree); 6137 pmap_setlwpvm(lp, newvm); 6138 if (adjrefs) 6139 vmspace_rel(oldvm); 6140 } 6141 } 6142 6143 /* 6144 * Set the vmspace for a LWP. The vmspace is almost universally set the 6145 * same as the process vmspace, but virtual kernels need to swap out contexts 6146 * on a per-lwp basis. 6147 * 6148 * Caller does not necessarily hold any vmspace tokens. Caller must control 6149 * the lwp (typically be in the context of the lwp). We use a critical 6150 * section to protect against statclock and hardclock (statistics collection). 6151 */ 6152 void 6153 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm) 6154 { 6155 struct vmspace *oldvm; 6156 struct pmap *pmap; 6157 thread_t td; 6158 6159 oldvm = lp->lwp_vmspace; 6160 6161 if (oldvm != newvm) { 6162 crit_enter(); 6163 td = curthread; 6164 KKASSERT((newvm->vm_refcnt & VM_REF_DELETED) == 0); 6165 lp->lwp_vmspace = newvm; 6166 if (td->td_lwp == lp) { 6167 pmap = vmspace_pmap(newvm); 6168 ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid); 6169 if (pmap->pm_active_lock & CPULOCK_EXCL) 6170 pmap_interlock_wait(newvm); 6171 #if defined(SWTCH_OPTIM_STATS) 6172 tlb_flush_count++; 6173 #endif 6174 if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) { 6175 td->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4); 6176 if (meltdown_mitigation && pmap->pm_pmlpv_iso) { 6177 td->td_pcb->pcb_cr3_iso = 6178 vtophys(pmap->pm_pml4_iso); 6179 td->td_pcb->pcb_flags |= PCB_ISOMMU; 6180 } else { 6181 td->td_pcb->pcb_cr3_iso = 0; 6182 td->td_pcb->pcb_flags &= ~PCB_ISOMMU; 6183 } 6184 } else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) { 6185 td->td_pcb->pcb_cr3 = KPML4phys; 6186 td->td_pcb->pcb_cr3_iso = 0; 6187 td->td_pcb->pcb_flags &= ~PCB_ISOMMU; 6188 } else { 6189 panic("pmap_setlwpvm: unknown pmap type\n"); 6190 } 6191 6192 /* 6193 * The MMU separation fields needs to be updated. 6194 * (it can't access the pcb directly from the 6195 * restricted user pmap). 6196 */ 6197 { 6198 struct trampframe *tramp; 6199 6200 tramp = &pscpu->trampoline; 6201 tramp->tr_pcb_cr3 = td->td_pcb->pcb_cr3; 6202 tramp->tr_pcb_cr3_iso = td->td_pcb->pcb_cr3_iso; 6203 tramp->tr_pcb_flags = td->td_pcb->pcb_flags; 6204 tramp->tr_pcb_rsp = (register_t)td->td_pcb; 6205 /* tr_pcb_rsp doesn't change */ 6206 } 6207 6208 /* 6209 * In kernel-land we always use the normal PML4E 6210 * so the kernel is fully mapped and can also access 6211 * user memory. 6212 */ 6213 load_cr3(td->td_pcb->pcb_cr3); 6214 pmap = vmspace_pmap(oldvm); 6215 ATOMIC_CPUMASK_NANDBIT(pmap->pm_active, 6216 mycpu->gd_cpuid); 6217 } 6218 crit_exit(); 6219 } 6220 } 6221 6222 /* 6223 * Called when switching to a locked pmap, used to interlock against pmaps 6224 * undergoing modifications to prevent us from activating the MMU for the 6225 * target pmap until all such modifications have completed. We have to do 6226 * this because the thread making the modifications has already set up its 6227 * SMP synchronization mask. 6228 * 6229 * This function cannot sleep! 6230 * 6231 * No requirements. 6232 */ 6233 void 6234 pmap_interlock_wait(struct vmspace *vm) 6235 { 6236 struct pmap *pmap = &vm->vm_pmap; 6237 6238 if (pmap->pm_active_lock & CPULOCK_EXCL) { 6239 crit_enter(); 6240 KKASSERT(curthread->td_critcount >= 2); 6241 DEBUG_PUSH_INFO("pmap_interlock_wait"); 6242 while (pmap->pm_active_lock & CPULOCK_EXCL) { 6243 cpu_ccfence(); 6244 lwkt_process_ipiq(); 6245 } 6246 DEBUG_POP_INFO(); 6247 crit_exit(); 6248 } 6249 } 6250 6251 vm_offset_t 6252 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 6253 { 6254 6255 if ((obj == NULL) || (size < NBPDR) || 6256 ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) { 6257 return addr; 6258 } 6259 6260 addr = roundup2(addr, NBPDR); 6261 return addr; 6262 } 6263 6264 /* 6265 * Used by kmalloc/kfree, page already exists at va 6266 */ 6267 vm_page_t 6268 pmap_kvtom(vm_offset_t va) 6269 { 6270 pt_entry_t *ptep = vtopte(va); 6271 6272 return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME)); 6273 } 6274 6275 /* 6276 * Initialize machine-specific shared page directory support. This 6277 * is executed when a VM object is created. 6278 */ 6279 void 6280 pmap_object_init(vm_object_t object) 6281 { 6282 } 6283 6284 /* 6285 * Clean up machine-specific shared page directory support. This 6286 * is executed when a VM object is destroyed. 6287 */ 6288 void 6289 pmap_object_free(vm_object_t object) 6290 { 6291 } 6292 6293 /* 6294 * pmap_pgscan_callback - Used by pmap_pgscan to acquire the related 6295 * VM page and issue a pginfo->callback. 6296 */ 6297 static 6298 void 6299 pmap_pgscan_callback(pmap_t pmap, struct pmap_scan_info *info, 6300 vm_pindex_t *pte_placemark, 6301 pv_entry_t pt_pv, vm_offset_t va, 6302 pt_entry_t *ptep, void *arg) 6303 { 6304 struct pmap_pgscan_info *pginfo = arg; 6305 vm_page_t m; 6306 pt_entry_t pte; 6307 6308 pte = *ptep; 6309 cpu_ccfence(); 6310 6311 if (pte & pmap->pmap_bits[PG_MANAGED_IDX]) { 6312 /* 6313 * Try to busy the page while we hold the pte_placemark locked. 6314 */ 6315 m = PHYS_TO_VM_PAGE(*ptep & PG_FRAME); 6316 if (vm_page_busy_try(m, TRUE) == 0) { 6317 if (m == PHYS_TO_VM_PAGE(*ptep & PG_FRAME)) { 6318 /* 6319 * The callback is issued with the pt_pv 6320 * unlocked. 6321 */ 6322 pv_placemarker_wakeup(pmap, pte_placemark); 6323 if (pt_pv) { 6324 vm_page_wire_quick(pt_pv->pv_m); 6325 pv_unlock(pt_pv); 6326 } 6327 if (pginfo->callback(pginfo, va, m) < 0) 6328 info->stop = 1; 6329 if (pt_pv) { 6330 pv_lock(pt_pv); 6331 if (vm_page_unwire_quick(pt_pv->pv_m)) { 6332 panic("pmap_pgscan: bad wire_" 6333 "count on pt_pv"); 6334 } 6335 } 6336 } else { 6337 vm_page_wakeup(m); 6338 pv_placemarker_wakeup(pmap, pte_placemark); 6339 } 6340 } else { 6341 ++pginfo->busycount; 6342 pv_placemarker_wakeup(pmap, pte_placemark); 6343 } 6344 } else { 6345 /* 6346 * Shared page table or unmanaged page (sharept or !sharept) 6347 */ 6348 pv_placemarker_wakeup(pmap, pte_placemark); 6349 } 6350 } 6351 6352 void 6353 pmap_pgscan(struct pmap_pgscan_info *pginfo) 6354 { 6355 struct pmap_scan_info info; 6356 6357 pginfo->offset = pginfo->beg_addr; 6358 info.pmap = pginfo->pmap; 6359 info.sva = pginfo->beg_addr; 6360 info.eva = pginfo->end_addr; 6361 info.func = pmap_pgscan_callback; 6362 info.arg = pginfo; 6363 pmap_scan(&info, 0); 6364 if (info.stop == 0) 6365 pginfo->offset = pginfo->end_addr; 6366 } 6367 6368 /* 6369 * Wait for a placemarker that we do not own to clear. The placemarker 6370 * in question is not necessarily set to the pindex we want, we may have 6371 * to wait on the element because we want to reserve it ourselves. 6372 * 6373 * NOTE: PM_PLACEMARK_WAKEUP sets a bit which is already set in 6374 * PM_NOPLACEMARK, so it does not interfere with placemarks 6375 * which have already been woken up. 6376 * 6377 * NOTE: This routine is called without the pmap spin-lock and so can 6378 * race changes to *pmark. Due to the sensitivity of the routine 6379 * to possible MULTIPLE interactions from other cpus, and the 6380 * overloading of the WAKEUP bit on PM_NOPLACEMARK, we have to 6381 * use a cmpset loop to avoid a race that might cause the WAKEUP 6382 * bit to be lost. 6383 * 6384 * Caller is expected to retry its operation upon return. 6385 */ 6386 static 6387 void 6388 pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark) 6389 { 6390 vm_pindex_t mark; 6391 6392 mark = *pmark; 6393 cpu_ccfence(); 6394 while (mark != PM_NOPLACEMARK) { 6395 tsleep_interlock(pmark, 0); 6396 if (atomic_fcmpset_long(pmark, &mark, 6397 mark | PM_PLACEMARK_WAKEUP)) { 6398 tsleep(pmark, PINTERLOCKED, "pvplw", 0); 6399 break; 6400 } 6401 } 6402 } 6403 6404 /* 6405 * Wakeup a placemarker that we own. Replace the entry with 6406 * PM_NOPLACEMARK and issue a wakeup() if necessary. 6407 */ 6408 static 6409 void 6410 pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark) 6411 { 6412 vm_pindex_t pindex; 6413 6414 pindex = atomic_swap_long(pmark, PM_NOPLACEMARK); 6415 KKASSERT(pindex != PM_NOPLACEMARK); 6416 if (pindex & PM_PLACEMARK_WAKEUP) 6417 wakeup(pmark); 6418 } 6419