1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1994 David Greenman 5 * Copyright (c) 2003 Peter Wemm 6 * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu> 7 * Copyright (c) 2008, 2009 The DragonFly Project. 8 * Copyright (c) 2008, 2009 Jordan Gordeev. 9 * Copyright (c) 2011-2012 Matthew Dillon 10 * All rights reserved. 11 * 12 * This code is derived from software contributed to Berkeley by 13 * the Systems Programming Group of the University of Utah Computer 14 * Science Department and William Jolitz of UUNET Technologies Inc. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 */ 44 /* 45 * Manage physical address maps for x86-64 systems. 46 */ 47 48 #if 0 /* JG */ 49 #include "opt_disable_pse.h" 50 #include "opt_pmap.h" 51 #endif 52 #include "opt_msgbuf.h" 53 54 #include <sys/param.h> 55 #include <sys/kernel.h> 56 #include <sys/proc.h> 57 #include <sys/msgbuf.h> 58 #include <sys/vmmeter.h> 59 #include <sys/mman.h> 60 #include <sys/systm.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_param.h> 64 #include <sys/sysctl.h> 65 #include <sys/lock.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_page.h> 68 #include <vm/vm_map.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_extern.h> 71 #include <vm/vm_pageout.h> 72 #include <vm/vm_pager.h> 73 #include <vm/vm_zone.h> 74 75 #include <sys/user.h> 76 #include <sys/thread2.h> 77 #include <sys/sysref2.h> 78 #include <sys/spinlock2.h> 79 #include <vm/vm_page2.h> 80 81 #include <machine/cputypes.h> 82 #include <machine/md_var.h> 83 #include <machine/specialreg.h> 84 #include <machine/smp.h> 85 #include <machine_base/apic/apicreg.h> 86 #include <machine/globaldata.h> 87 #include <machine/pmap.h> 88 #include <machine/pmap_inval.h> 89 #include <machine/inttypes.h> 90 91 #include <ddb/ddb.h> 92 93 #define PMAP_KEEP_PDIRS 94 #ifndef PMAP_SHPGPERPROC 95 #define PMAP_SHPGPERPROC 2000 96 #endif 97 98 #if defined(DIAGNOSTIC) 99 #define PMAP_DIAGNOSTIC 100 #endif 101 102 #define MINPV 2048 103 104 /* 105 * pmap debugging will report who owns a pv lock when blocking. 106 */ 107 #ifdef PMAP_DEBUG 108 109 #define PMAP_DEBUG_DECL ,const char *func, int lineno 110 #define PMAP_DEBUG_ARGS , __func__, __LINE__ 111 #define PMAP_DEBUG_COPY , func, lineno 112 113 #define pv_get(pmap, pindex) _pv_get(pmap, pindex \ 114 PMAP_DEBUG_ARGS) 115 #define pv_lock(pv) _pv_lock(pv \ 116 PMAP_DEBUG_ARGS) 117 #define pv_hold_try(pv) _pv_hold_try(pv \ 118 PMAP_DEBUG_ARGS) 119 #define pv_alloc(pmap, pindex, isnewp) _pv_alloc(pmap, pindex, isnewp \ 120 PMAP_DEBUG_ARGS) 121 122 #else 123 124 #define PMAP_DEBUG_DECL 125 #define PMAP_DEBUG_ARGS 126 #define PMAP_DEBUG_COPY 127 128 #define pv_get(pmap, pindex) _pv_get(pmap, pindex) 129 #define pv_lock(pv) _pv_lock(pv) 130 #define pv_hold_try(pv) _pv_hold_try(pv) 131 #define pv_alloc(pmap, pindex, isnewp) _pv_alloc(pmap, pindex, isnewp) 132 133 #endif 134 135 /* 136 * Get PDEs and PTEs for user/kernel address space 137 */ 138 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 139 140 #define pmap_pde_v(pmap, pte) ((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0) 141 #define pmap_pte_w(pmap, pte) ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0) 142 #define pmap_pte_m(pmap, pte) ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0) 143 #define pmap_pte_u(pmap, pte) ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0) 144 #define pmap_pte_v(pmap, pte) ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0) 145 146 /* 147 * Given a map and a machine independent protection code, 148 * convert to a vax protection code. 149 */ 150 #define pte_prot(m, p) \ 151 (m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)]) 152 static int protection_codes[PROTECTION_CODES_SIZE]; 153 154 struct pmap kernel_pmap; 155 static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list); 156 157 MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects"); 158 159 vm_paddr_t avail_start; /* PA of first available physical page */ 160 vm_paddr_t avail_end; /* PA of last available physical page */ 161 vm_offset_t virtual2_start; /* cutout free area prior to kernel start */ 162 vm_offset_t virtual2_end; 163 vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */ 164 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 165 vm_offset_t KvaStart; /* VA start of KVA space */ 166 vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */ 167 vm_offset_t KvaSize; /* max size of kernel virtual address space */ 168 static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 169 //static int pgeflag; /* PG_G or-in */ 170 //static int pseflag; /* PG_PS or-in */ 171 uint64_t PatMsr; 172 173 static int ndmpdp; 174 static vm_paddr_t dmaplimit; 175 static int nkpt; 176 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 177 178 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE]; /* PAT -> PG_ bits */ 179 /*static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];*/ /* PAT -> PG_ bits */ 180 181 static uint64_t KPTbase; 182 static uint64_t KPTphys; 183 static uint64_t KPDphys; /* phys addr of kernel level 2 */ 184 static uint64_t KPDbase; /* phys addr of kernel level 2 @ KERNBASE */ 185 uint64_t KPDPphys; /* phys addr of kernel level 3 */ 186 uint64_t KPML4phys; /* phys addr of kernel level 4 */ 187 188 static uint64_t DMPDphys; /* phys addr of direct mapped level 2 */ 189 static uint64_t DMPDPphys; /* phys addr of direct mapped level 3 */ 190 191 /* 192 * Data for the pv entry allocation mechanism 193 */ 194 static vm_zone_t pvzone; 195 static struct vm_zone pvzone_store; 196 static struct vm_object pvzone_obj; 197 static int pv_entry_max=0, pv_entry_high_water=0; 198 static int pmap_pagedaemon_waken = 0; 199 static struct pv_entry *pvinit; 200 201 /* 202 * All those kernel PT submaps that BSD is so fond of 203 */ 204 pt_entry_t *CMAP1 = NULL, *ptmmap; 205 caddr_t CADDR1 = NULL, ptvmmap = NULL; 206 static pt_entry_t *msgbufmap; 207 struct msgbuf *msgbufp=NULL; 208 209 /* 210 * PMAP default PG_* bits. Needed to be able to add 211 * EPT/NPT pagetable pmap_bits for the VMM module 212 */ 213 uint64_t pmap_bits_default[] = { 214 REGULAR_PMAP, /* TYPE_IDX 0 */ 215 X86_PG_V, /* PG_V_IDX 1 */ 216 X86_PG_RW, /* PG_RW_IDX 2 */ 217 X86_PG_U, /* PG_U_IDX 3 */ 218 X86_PG_A, /* PG_A_IDX 4 */ 219 X86_PG_M, /* PG_M_IDX 5 */ 220 X86_PG_PS, /* PG_PS_IDX3 6 */ 221 X86_PG_G, /* PG_G_IDX 7 */ 222 X86_PG_AVAIL1, /* PG_AVAIL1_IDX 8 */ 223 X86_PG_AVAIL2, /* PG_AVAIL2_IDX 9 */ 224 X86_PG_AVAIL3, /* PG_AVAIL3_IDX 10 */ 225 X86_PG_NC_PWT | X86_PG_NC_PCD, /* PG_N_IDX 11 */ 226 }; 227 /* 228 * Crashdump maps. 229 */ 230 static pt_entry_t *pt_crashdumpmap; 231 static caddr_t crashdumpmap; 232 233 #ifdef PMAP_DEBUG2 234 static int pmap_enter_debug = 0; 235 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW, 236 &pmap_enter_debug, 0, "Debug pmap_enter's"); 237 #endif 238 static int pmap_yield_count = 64; 239 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW, 240 &pmap_yield_count, 0, "Yield during init_pt/release"); 241 static int pmap_mmu_optimize = 0; 242 SYSCTL_INT(_machdep, OID_AUTO, pmap_mmu_optimize, CTLFLAG_RW, 243 &pmap_mmu_optimize, 0, "Share page table pages when possible"); 244 int pmap_fast_kernel_cpusync = 0; 245 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW, 246 &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible"); 247 248 #define DISABLE_PSE 249 250 /* Standard user access funtions */ 251 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len, 252 size_t *lencopied); 253 extern int std_copyin (const void *udaddr, void *kaddr, size_t len); 254 extern int std_copyout (const void *kaddr, void *udaddr, size_t len); 255 extern int std_fubyte (const void *base); 256 extern int std_subyte (void *base, int byte); 257 extern long std_fuword (const void *base); 258 extern int std_suword (void *base, long word); 259 extern int std_suword32 (void *base, int word); 260 261 static void pv_hold(pv_entry_t pv); 262 static int _pv_hold_try(pv_entry_t pv 263 PMAP_DEBUG_DECL); 264 static void pv_drop(pv_entry_t pv); 265 static void _pv_lock(pv_entry_t pv 266 PMAP_DEBUG_DECL); 267 static void pv_unlock(pv_entry_t pv); 268 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew 269 PMAP_DEBUG_DECL); 270 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex 271 PMAP_DEBUG_DECL); 272 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp); 273 static pv_entry_t pv_find(pmap_t pmap, vm_pindex_t pindex); 274 static void pv_put(pv_entry_t pv); 275 static void pv_free(pv_entry_t pv); 276 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex); 277 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, 278 pv_entry_t *pvpp); 279 static pv_entry_t pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, 280 pv_entry_t *pvpp, vm_map_entry_t entry, vm_offset_t va); 281 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, 282 struct pmap_inval_info *info); 283 static vm_page_t pmap_remove_pv_page(pv_entry_t pv); 284 static int pmap_release_pv( struct pmap_inval_info *info, 285 pv_entry_t pv, pv_entry_t pvp); 286 287 struct pmap_scan_info; 288 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info, 289 pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept, 290 vm_offset_t va, pt_entry_t *ptep, void *arg __unused); 291 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info, 292 pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept, 293 vm_offset_t va, pt_entry_t *ptep, void *arg __unused); 294 295 static void i386_protection_init (void); 296 static void create_pagetables(vm_paddr_t *firstaddr); 297 static void pmap_remove_all (vm_page_t m); 298 static boolean_t pmap_testbit (vm_page_t m, int bit); 299 300 static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va); 301 static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 302 303 static void pmap_pinit_defaults(struct pmap *pmap); 304 305 static unsigned pdir4mb; 306 307 static int 308 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2) 309 { 310 if (pv1->pv_pindex < pv2->pv_pindex) 311 return(-1); 312 if (pv1->pv_pindex > pv2->pv_pindex) 313 return(1); 314 return(0); 315 } 316 317 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry, 318 pv_entry_compare, vm_pindex_t, pv_pindex); 319 320 static __inline 321 void 322 pmap_page_stats_adding(vm_page_t m) 323 { 324 globaldata_t gd = mycpu; 325 326 if (TAILQ_EMPTY(&m->md.pv_list)) { 327 ++gd->gd_vmtotal.t_arm; 328 } else if (TAILQ_FIRST(&m->md.pv_list) == 329 TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) { 330 ++gd->gd_vmtotal.t_armshr; 331 ++gd->gd_vmtotal.t_avmshr; 332 } else { 333 ++gd->gd_vmtotal.t_avmshr; 334 } 335 } 336 337 static __inline 338 void 339 pmap_page_stats_deleting(vm_page_t m) 340 { 341 globaldata_t gd = mycpu; 342 343 if (TAILQ_EMPTY(&m->md.pv_list)) { 344 --gd->gd_vmtotal.t_arm; 345 } else if (TAILQ_FIRST(&m->md.pv_list) == 346 TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) { 347 --gd->gd_vmtotal.t_armshr; 348 --gd->gd_vmtotal.t_avmshr; 349 } else { 350 --gd->gd_vmtotal.t_avmshr; 351 } 352 } 353 354 /* 355 * Move the kernel virtual free pointer to the next 356 * 2MB. This is used to help improve performance 357 * by using a large (2MB) page for much of the kernel 358 * (.text, .data, .bss) 359 */ 360 static 361 vm_offset_t 362 pmap_kmem_choose(vm_offset_t addr) 363 { 364 vm_offset_t newaddr = addr; 365 366 newaddr = roundup2(addr, NBPDR); 367 return newaddr; 368 } 369 370 /* 371 * pmap_pte_quick: 372 * 373 * Super fast pmap_pte routine best used when scanning the pv lists. 374 * This eliminates many course-grained invltlb calls. Note that many of 375 * the pv list scans are across different pmaps and it is very wasteful 376 * to do an entire invltlb when checking a single mapping. 377 */ 378 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va); 379 380 static 381 pt_entry_t * 382 pmap_pte_quick(pmap_t pmap, vm_offset_t va) 383 { 384 return pmap_pte(pmap, va); 385 } 386 387 /* 388 * Returns the pindex of a page table entry (representing a terminal page). 389 * There are NUPTE_TOTAL page table entries possible (a huge number) 390 * 391 * x86-64 has a 48-bit address space, where bit 47 is sign-extended out. 392 * We want to properly translate negative KVAs. 393 */ 394 static __inline 395 vm_pindex_t 396 pmap_pte_pindex(vm_offset_t va) 397 { 398 return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1)); 399 } 400 401 /* 402 * Returns the pindex of a page table. 403 */ 404 static __inline 405 vm_pindex_t 406 pmap_pt_pindex(vm_offset_t va) 407 { 408 return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1))); 409 } 410 411 /* 412 * Returns the pindex of a page directory. 413 */ 414 static __inline 415 vm_pindex_t 416 pmap_pd_pindex(vm_offset_t va) 417 { 418 return (NUPTE_TOTAL + NUPT_TOTAL + 419 ((va >> PDPSHIFT) & (NUPD_TOTAL - 1))); 420 } 421 422 static __inline 423 vm_pindex_t 424 pmap_pdp_pindex(vm_offset_t va) 425 { 426 return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + 427 ((va >> PML4SHIFT) & (NUPDP_TOTAL - 1))); 428 } 429 430 static __inline 431 vm_pindex_t 432 pmap_pml4_pindex(void) 433 { 434 return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL); 435 } 436 437 /* 438 * Return various clipped indexes for a given VA 439 * 440 * Returns the index of a pte in a page table, representing a terminal 441 * page. 442 */ 443 static __inline 444 vm_pindex_t 445 pmap_pte_index(vm_offset_t va) 446 { 447 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); 448 } 449 450 /* 451 * Returns the index of a pt in a page directory, representing a page 452 * table. 453 */ 454 static __inline 455 vm_pindex_t 456 pmap_pt_index(vm_offset_t va) 457 { 458 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); 459 } 460 461 /* 462 * Returns the index of a pd in a page directory page, representing a page 463 * directory. 464 */ 465 static __inline 466 vm_pindex_t 467 pmap_pd_index(vm_offset_t va) 468 { 469 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); 470 } 471 472 /* 473 * Returns the index of a pdp in the pml4 table, representing a page 474 * directory page. 475 */ 476 static __inline 477 vm_pindex_t 478 pmap_pdp_index(vm_offset_t va) 479 { 480 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); 481 } 482 483 /* 484 * Generic procedure to index a pte from a pt, pd, or pdp. 485 * 486 * NOTE: Normally passed pindex as pmap_xx_index(). pmap_xx_pindex() is NOT 487 * a page table page index but is instead of PV lookup index. 488 */ 489 static 490 void * 491 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex) 492 { 493 pt_entry_t *pte; 494 495 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m)); 496 return(&pte[pindex]); 497 } 498 499 /* 500 * Return pointer to PDP slot in the PML4 501 */ 502 static __inline 503 pml4_entry_t * 504 pmap_pdp(pmap_t pmap, vm_offset_t va) 505 { 506 return (&pmap->pm_pml4[pmap_pdp_index(va)]); 507 } 508 509 /* 510 * Return pointer to PD slot in the PDP given a pointer to the PDP 511 */ 512 static __inline 513 pdp_entry_t * 514 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va) 515 { 516 pdp_entry_t *pd; 517 518 pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME); 519 return (&pd[pmap_pd_index(va)]); 520 } 521 522 /* 523 * Return pointer to PD slot in the PDP. 524 */ 525 static __inline 526 pdp_entry_t * 527 pmap_pd(pmap_t pmap, vm_offset_t va) 528 { 529 pml4_entry_t *pdp; 530 531 pdp = pmap_pdp(pmap, va); 532 if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0) 533 return NULL; 534 return (pmap_pdp_to_pd(*pdp, va)); 535 } 536 537 /* 538 * Return pointer to PT slot in the PD given a pointer to the PD 539 */ 540 static __inline 541 pd_entry_t * 542 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va) 543 { 544 pd_entry_t *pt; 545 546 pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME); 547 return (&pt[pmap_pt_index(va)]); 548 } 549 550 /* 551 * Return pointer to PT slot in the PD 552 * 553 * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs, 554 * so we cannot lookup the PD via the PDP. Instead we 555 * must look it up via the pmap. 556 */ 557 static __inline 558 pd_entry_t * 559 pmap_pt(pmap_t pmap, vm_offset_t va) 560 { 561 pdp_entry_t *pd; 562 pv_entry_t pv; 563 vm_pindex_t pd_pindex; 564 565 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) { 566 pd_pindex = pmap_pd_pindex(va); 567 spin_lock(&pmap->pm_spin); 568 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex); 569 spin_unlock(&pmap->pm_spin); 570 if (pv == NULL || pv->pv_m == NULL) 571 return NULL; 572 return (pmap_pd_to_pt(VM_PAGE_TO_PHYS(pv->pv_m), va)); 573 } else { 574 pd = pmap_pd(pmap, va); 575 if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0) 576 return NULL; 577 return (pmap_pd_to_pt(*pd, va)); 578 } 579 } 580 581 /* 582 * Return pointer to PTE slot in the PT given a pointer to the PT 583 */ 584 static __inline 585 pt_entry_t * 586 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va) 587 { 588 pt_entry_t *pte; 589 590 pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME); 591 return (&pte[pmap_pte_index(va)]); 592 } 593 594 /* 595 * Return pointer to PTE slot in the PT 596 */ 597 static __inline 598 pt_entry_t * 599 pmap_pte(pmap_t pmap, vm_offset_t va) 600 { 601 pd_entry_t *pt; 602 603 pt = pmap_pt(pmap, va); 604 if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0) 605 return NULL; 606 if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0) 607 return ((pt_entry_t *)pt); 608 return (pmap_pt_to_pte(*pt, va)); 609 } 610 611 /* 612 * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is 613 * the PT layer. This will speed up core pmap operations considerably. 614 * 615 * NOTE: The pmap spinlock does not need to be held but the passed-in pv 616 * must be in a known associated state (typically by being locked when 617 * the pmap spinlock isn't held). We allow the race for that case. 618 */ 619 static __inline 620 void 621 pv_cache(pv_entry_t pv, vm_pindex_t pindex) 622 { 623 if (pindex >= pmap_pt_pindex(0) && pindex <= pmap_pd_pindex(0)) 624 pv->pv_pmap->pm_pvhint = pv; 625 } 626 627 628 /* 629 * Return address of PT slot in PD (KVM only) 630 * 631 * Cannot be used for user page tables because it might interfere with 632 * the shared page-table-page optimization (pmap_mmu_optimize). 633 */ 634 static __inline 635 pd_entry_t * 636 vtopt(vm_offset_t va) 637 { 638 uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + 639 NPML4EPGSHIFT)) - 1); 640 641 return (PDmap + ((va >> PDRSHIFT) & mask)); 642 } 643 644 /* 645 * KVM - return address of PTE slot in PT 646 */ 647 static __inline 648 pt_entry_t * 649 vtopte(vm_offset_t va) 650 { 651 uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + 652 NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); 653 654 return (PTmap + ((va >> PAGE_SHIFT) & mask)); 655 } 656 657 static uint64_t 658 allocpages(vm_paddr_t *firstaddr, long n) 659 { 660 uint64_t ret; 661 662 ret = *firstaddr; 663 bzero((void *)ret, n * PAGE_SIZE); 664 *firstaddr += n * PAGE_SIZE; 665 return (ret); 666 } 667 668 static 669 void 670 create_pagetables(vm_paddr_t *firstaddr) 671 { 672 long i; /* must be 64 bits */ 673 long nkpt_base; 674 long nkpt_phys; 675 int j; 676 677 /* 678 * We are running (mostly) V=P at this point 679 * 680 * Calculate NKPT - number of kernel page tables. We have to 681 * accomodoate prealloction of the vm_page_array, dump bitmap, 682 * MSGBUF_SIZE, and other stuff. Be generous. 683 * 684 * Maxmem is in pages. 685 * 686 * ndmpdp is the number of 1GB pages we wish to map. 687 */ 688 ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT; 689 if (ndmpdp < 4) /* Minimum 4GB of dirmap */ 690 ndmpdp = 4; 691 KKASSERT(ndmpdp <= NKPDPE * NPDEPG); 692 693 /* 694 * Starting at the beginning of kvm (not KERNBASE). 695 */ 696 nkpt_phys = (Maxmem * sizeof(struct vm_page) + NBPDR - 1) / NBPDR; 697 nkpt_phys += (Maxmem * sizeof(struct pv_entry) + NBPDR - 1) / NBPDR; 698 nkpt_phys += ((nkpt + nkpt + 1 + NKPML4E + NKPDPE + NDMPML4E + 699 ndmpdp) + 511) / 512; 700 nkpt_phys += 128; 701 702 /* 703 * Starting at KERNBASE - map 2G worth of page table pages. 704 * KERNBASE is offset -2G from the end of kvm. 705 */ 706 nkpt_base = (NPDPEPG - KPDPI) * NPTEPG; /* typically 2 x 512 */ 707 708 /* 709 * Allocate pages 710 */ 711 KPTbase = allocpages(firstaddr, nkpt_base); 712 KPTphys = allocpages(firstaddr, nkpt_phys); 713 KPML4phys = allocpages(firstaddr, 1); 714 KPDPphys = allocpages(firstaddr, NKPML4E); 715 KPDphys = allocpages(firstaddr, NKPDPE); 716 717 /* 718 * Calculate the page directory base for KERNBASE, 719 * that is where we start populating the page table pages. 720 * Basically this is the end - 2. 721 */ 722 KPDbase = KPDphys + ((NKPDPE - (NPDPEPG - KPDPI)) << PAGE_SHIFT); 723 724 DMPDPphys = allocpages(firstaddr, NDMPML4E); 725 if ((amd_feature & AMDID_PAGE1GB) == 0) 726 DMPDphys = allocpages(firstaddr, ndmpdp); 727 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; 728 729 /* 730 * Fill in the underlying page table pages for the area around 731 * KERNBASE. This remaps low physical memory to KERNBASE. 732 * 733 * Read-only from zero to physfree 734 * XXX not fully used, underneath 2M pages 735 */ 736 for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) { 737 ((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT; 738 ((pt_entry_t *)KPTbase)[i] |= 739 pmap_bits_default[PG_RW_IDX] | 740 pmap_bits_default[PG_V_IDX] | 741 pmap_bits_default[PG_G_IDX]; 742 } 743 744 /* 745 * Now map the initial kernel page tables. One block of page 746 * tables is placed at the beginning of kernel virtual memory, 747 * and another block is placed at KERNBASE to map the kernel binary, 748 * data, bss, and initial pre-allocations. 749 */ 750 for (i = 0; i < nkpt_base; i++) { 751 ((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT); 752 ((pd_entry_t *)KPDbase)[i] |= 753 pmap_bits_default[PG_RW_IDX] | 754 pmap_bits_default[PG_V_IDX]; 755 } 756 for (i = 0; i < nkpt_phys; i++) { 757 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT); 758 ((pd_entry_t *)KPDphys)[i] |= 759 pmap_bits_default[PG_RW_IDX] | 760 pmap_bits_default[PG_V_IDX]; 761 } 762 763 /* 764 * Map from zero to end of allocations using 2M pages as an 765 * optimization. This will bypass some of the KPTBase pages 766 * above in the KERNBASE area. 767 */ 768 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) { 769 ((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT; 770 ((pd_entry_t *)KPDbase)[i] |= 771 pmap_bits_default[PG_RW_IDX] | 772 pmap_bits_default[PG_V_IDX] | 773 pmap_bits_default[PG_PS_IDX] | 774 pmap_bits_default[PG_G_IDX]; 775 } 776 777 /* 778 * And connect up the PD to the PDP. The kernel pmap is expected 779 * to pre-populate all of its PDs. See NKPDPE in vmparam.h. 780 */ 781 for (i = 0; i < NKPDPE; i++) { 782 ((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] = 783 KPDphys + (i << PAGE_SHIFT); 784 ((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] |= 785 pmap_bits_default[PG_RW_IDX] | 786 pmap_bits_default[PG_V_IDX] | 787 pmap_bits_default[PG_U_IDX]; 788 } 789 790 /* 791 * Now set up the direct map space using either 2MB or 1GB pages 792 * Preset PG_M and PG_A because demotion expects it. 793 * 794 * When filling in entries in the PD pages make sure any excess 795 * entries are set to zero as we allocated enough PD pages 796 */ 797 if ((amd_feature & AMDID_PAGE1GB) == 0) { 798 for (i = 0; i < NPDEPG * ndmpdp; i++) { 799 ((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT; 800 ((pd_entry_t *)DMPDphys)[i] |= 801 pmap_bits_default[PG_RW_IDX] | 802 pmap_bits_default[PG_V_IDX] | 803 pmap_bits_default[PG_PS_IDX] | 804 pmap_bits_default[PG_G_IDX] | 805 pmap_bits_default[PG_M_IDX] | 806 pmap_bits_default[PG_A_IDX]; 807 } 808 809 /* 810 * And the direct map space's PDP 811 */ 812 for (i = 0; i < ndmpdp; i++) { 813 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + 814 (i << PAGE_SHIFT); 815 ((pdp_entry_t *)DMPDPphys)[i] |= 816 pmap_bits_default[PG_RW_IDX] | 817 pmap_bits_default[PG_V_IDX] | 818 pmap_bits_default[PG_U_IDX]; 819 } 820 } else { 821 for (i = 0; i < ndmpdp; i++) { 822 ((pdp_entry_t *)DMPDPphys)[i] = 823 (vm_paddr_t)i << PDPSHIFT; 824 ((pdp_entry_t *)DMPDPphys)[i] |= 825 pmap_bits_default[PG_RW_IDX] | 826 pmap_bits_default[PG_V_IDX] | 827 pmap_bits_default[PG_PS_IDX] | 828 pmap_bits_default[PG_G_IDX] | 829 pmap_bits_default[PG_M_IDX] | 830 pmap_bits_default[PG_A_IDX]; 831 } 832 } 833 834 /* And recursively map PML4 to itself in order to get PTmap */ 835 ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys; 836 ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= 837 pmap_bits_default[PG_RW_IDX] | 838 pmap_bits_default[PG_V_IDX] | 839 pmap_bits_default[PG_U_IDX]; 840 841 /* 842 * Connect the Direct Map slots up to the PML4 843 */ 844 for (j = 0; j < NDMPML4E; ++j) { 845 ((pdp_entry_t *)KPML4phys)[DMPML4I + j] = 846 (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) | 847 pmap_bits_default[PG_RW_IDX] | 848 pmap_bits_default[PG_V_IDX] | 849 pmap_bits_default[PG_U_IDX]; 850 } 851 852 /* 853 * Connect the KVA slot up to the PML4 854 */ 855 ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys; 856 ((pdp_entry_t *)KPML4phys)[KPML4I] |= 857 pmap_bits_default[PG_RW_IDX] | 858 pmap_bits_default[PG_V_IDX] | 859 pmap_bits_default[PG_U_IDX]; 860 } 861 862 /* 863 * Bootstrap the system enough to run with virtual memory. 864 * 865 * On the i386 this is called after mapping has already been enabled 866 * and just syncs the pmap module with what has already been done. 867 * [We can't call it easily with mapping off since the kernel is not 868 * mapped with PA == VA, hence we would have to relocate every address 869 * from the linked base (virtual) address "KERNBASE" to the actual 870 * (physical) address starting relative to 0] 871 */ 872 void 873 pmap_bootstrap(vm_paddr_t *firstaddr) 874 { 875 vm_offset_t va; 876 pt_entry_t *pte; 877 878 KvaStart = VM_MIN_KERNEL_ADDRESS; 879 KvaEnd = VM_MAX_KERNEL_ADDRESS; 880 KvaSize = KvaEnd - KvaStart; 881 882 avail_start = *firstaddr; 883 884 /* 885 * Create an initial set of page tables to run the kernel in. 886 */ 887 create_pagetables(firstaddr); 888 889 virtual2_start = KvaStart; 890 virtual2_end = PTOV_OFFSET; 891 892 virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr; 893 virtual_start = pmap_kmem_choose(virtual_start); 894 895 virtual_end = VM_MAX_KERNEL_ADDRESS; 896 897 /* XXX do %cr0 as well */ 898 load_cr4(rcr4() | CR4_PGE | CR4_PSE); 899 load_cr3(KPML4phys); 900 901 /* 902 * Initialize protection array. 903 */ 904 i386_protection_init(); 905 906 /* 907 * The kernel's pmap is statically allocated so we don't have to use 908 * pmap_create, which is unlikely to work correctly at this part of 909 * the boot sequence (XXX and which no longer exists). 910 */ 911 kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys); 912 kernel_pmap.pm_count = 1; 913 CPUMASK_ASSALLONES(kernel_pmap.pm_active); 914 RB_INIT(&kernel_pmap.pm_pvroot); 915 spin_init(&kernel_pmap.pm_spin, "pmapbootstrap"); 916 lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok"); 917 918 /* 919 * Reserve some special page table entries/VA space for temporary 920 * mapping of pages. 921 */ 922 #define SYSMAP(c, p, v, n) \ 923 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 924 925 va = virtual_start; 926 pte = vtopte(va); 927 928 /* 929 * CMAP1/CMAP2 are used for zeroing and copying pages. 930 */ 931 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 932 933 /* 934 * Crashdump maps. 935 */ 936 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS); 937 938 /* 939 * ptvmmap is used for reading arbitrary physical pages via 940 * /dev/mem. 941 */ 942 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 943 944 /* 945 * msgbufp is used to map the system message buffer. 946 * XXX msgbufmap is not used. 947 */ 948 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 949 atop(round_page(MSGBUF_SIZE))) 950 951 virtual_start = va; 952 virtual_start = pmap_kmem_choose(virtual_start); 953 954 *CMAP1 = 0; 955 956 /* 957 * PG_G is terribly broken on SMP because we IPI invltlb's in some 958 * cases rather then invl1pg. Actually, I don't even know why it 959 * works under UP because self-referential page table mappings 960 */ 961 // pgeflag = 0; 962 963 /* 964 * Initialize the 4MB page size flag 965 */ 966 // pseflag = 0; 967 /* 968 * The 4MB page version of the initial 969 * kernel page mapping. 970 */ 971 pdir4mb = 0; 972 973 #if !defined(DISABLE_PSE) 974 if (cpu_feature & CPUID_PSE) { 975 pt_entry_t ptditmp; 976 /* 977 * Note that we have enabled PSE mode 978 */ 979 // pseflag = kernel_pmap.pmap_bits[PG_PS_IDX]; 980 ptditmp = *(PTmap + x86_64_btop(KERNBASE)); 981 ptditmp &= ~(NBPDR - 1); 982 ptditmp |= pmap_bits_default[PG_V_IDX] | 983 pmap_bits_default[PG_RW_IDX] | 984 pmap_bits_default[PG_PS_IDX] | 985 pmap_bits_default[PG_U_IDX]; 986 // pgeflag; 987 pdir4mb = ptditmp; 988 } 989 #endif 990 cpu_invltlb(); 991 992 /* Initialize the PAT MSR */ 993 pmap_init_pat(); 994 pmap_pinit_defaults(&kernel_pmap); 995 996 TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync", 997 &pmap_fast_kernel_cpusync); 998 999 } 1000 1001 /* 1002 * Setup the PAT MSR. 1003 */ 1004 void 1005 pmap_init_pat(void) 1006 { 1007 uint64_t pat_msr; 1008 u_long cr0, cr4; 1009 1010 /* 1011 * Default values mapping PATi,PCD,PWT bits at system reset. 1012 * The default values effectively ignore the PATi bit by 1013 * repeating the encodings for 0-3 in 4-7, and map the PCD 1014 * and PWT bit combinations to the expected PAT types. 1015 */ 1016 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | /* 000 */ 1017 PAT_VALUE(1, PAT_WRITE_THROUGH) | /* 001 */ 1018 PAT_VALUE(2, PAT_UNCACHED) | /* 010 */ 1019 PAT_VALUE(3, PAT_UNCACHEABLE) | /* 011 */ 1020 PAT_VALUE(4, PAT_WRITE_BACK) | /* 100 */ 1021 PAT_VALUE(5, PAT_WRITE_THROUGH) | /* 101 */ 1022 PAT_VALUE(6, PAT_UNCACHED) | /* 110 */ 1023 PAT_VALUE(7, PAT_UNCACHEABLE); /* 111 */ 1024 pat_pte_index[PAT_WRITE_BACK] = 0; 1025 pat_pte_index[PAT_WRITE_THROUGH]= 0 | X86_PG_NC_PWT; 1026 pat_pte_index[PAT_UNCACHED] = X86_PG_NC_PCD; 1027 pat_pte_index[PAT_UNCACHEABLE] = X86_PG_NC_PCD | X86_PG_NC_PWT; 1028 pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE]; 1029 pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE]; 1030 1031 if (cpu_feature & CPUID_PAT) { 1032 /* 1033 * If we support the PAT then set-up entries for 1034 * WRITE_PROTECTED and WRITE_COMBINING using bit patterns 1035 * 4 and 5. 1036 */ 1037 pat_msr = (pat_msr & ~PAT_MASK(4)) | 1038 PAT_VALUE(4, PAT_WRITE_PROTECTED); 1039 pat_msr = (pat_msr & ~PAT_MASK(5)) | 1040 PAT_VALUE(5, PAT_WRITE_COMBINING); 1041 pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | 0; 1042 pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PWT; 1043 1044 /* 1045 * Then enable the PAT 1046 */ 1047 1048 /* Disable PGE. */ 1049 cr4 = rcr4(); 1050 load_cr4(cr4 & ~CR4_PGE); 1051 1052 /* Disable caches (CD = 1, NW = 0). */ 1053 cr0 = rcr0(); 1054 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 1055 1056 /* Flushes caches and TLBs. */ 1057 wbinvd(); 1058 cpu_invltlb(); 1059 1060 /* Update PAT and index table. */ 1061 wrmsr(MSR_PAT, pat_msr); 1062 1063 /* Flush caches and TLBs again. */ 1064 wbinvd(); 1065 cpu_invltlb(); 1066 1067 /* Restore caches and PGE. */ 1068 load_cr0(cr0); 1069 load_cr4(cr4); 1070 PatMsr = pat_msr; 1071 } 1072 } 1073 1074 /* 1075 * Set 4mb pdir for mp startup 1076 */ 1077 void 1078 pmap_set_opt(void) 1079 { 1080 if (cpu_feature & CPUID_PSE) { 1081 load_cr4(rcr4() | CR4_PSE); 1082 if (pdir4mb && mycpu->gd_cpuid == 0) { /* only on BSP */ 1083 cpu_invltlb(); 1084 } 1085 } 1086 } 1087 1088 /* 1089 * Initialize the pmap module. 1090 * Called by vm_init, to initialize any structures that the pmap 1091 * system needs to map virtual memory. 1092 * pmap_init has been enhanced to support in a fairly consistant 1093 * way, discontiguous physical memory. 1094 */ 1095 void 1096 pmap_init(void) 1097 { 1098 int i; 1099 int initial_pvs; 1100 1101 /* 1102 * Allocate memory for random pmap data structures. Includes the 1103 * pv_head_table. 1104 */ 1105 1106 for (i = 0; i < vm_page_array_size; i++) { 1107 vm_page_t m; 1108 1109 m = &vm_page_array[i]; 1110 TAILQ_INIT(&m->md.pv_list); 1111 } 1112 1113 /* 1114 * init the pv free list 1115 */ 1116 initial_pvs = vm_page_array_size; 1117 if (initial_pvs < MINPV) 1118 initial_pvs = MINPV; 1119 pvzone = &pvzone_store; 1120 pvinit = (void *)kmem_alloc(&kernel_map, 1121 initial_pvs * sizeof (struct pv_entry)); 1122 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), 1123 pvinit, initial_pvs); 1124 1125 /* 1126 * Now it is safe to enable pv_table recording. 1127 */ 1128 pmap_initialized = TRUE; 1129 } 1130 1131 /* 1132 * Initialize the address space (zone) for the pv_entries. Set a 1133 * high water mark so that the system can recover from excessive 1134 * numbers of pv entries. 1135 */ 1136 void 1137 pmap_init2(void) 1138 { 1139 int shpgperproc = PMAP_SHPGPERPROC; 1140 int entry_max; 1141 1142 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1143 pv_entry_max = shpgperproc * maxproc + vm_page_array_size; 1144 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1145 pv_entry_high_water = 9 * (pv_entry_max / 10); 1146 1147 /* 1148 * Subtract out pages already installed in the zone (hack) 1149 */ 1150 entry_max = pv_entry_max - vm_page_array_size; 1151 if (entry_max <= 0) 1152 entry_max = 1; 1153 1154 zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1); 1155 } 1156 1157 /* 1158 * Typically used to initialize a fictitious page by vm/device_pager.c 1159 */ 1160 void 1161 pmap_page_init(struct vm_page *m) 1162 { 1163 vm_page_init(m); 1164 TAILQ_INIT(&m->md.pv_list); 1165 } 1166 1167 /*************************************************** 1168 * Low level helper routines..... 1169 ***************************************************/ 1170 1171 /* 1172 * this routine defines the region(s) of memory that should 1173 * not be tested for the modified bit. 1174 */ 1175 static __inline 1176 int 1177 pmap_track_modified(vm_pindex_t pindex) 1178 { 1179 vm_offset_t va = (vm_offset_t)pindex << PAGE_SHIFT; 1180 if ((va < clean_sva) || (va >= clean_eva)) 1181 return 1; 1182 else 1183 return 0; 1184 } 1185 1186 /* 1187 * Extract the physical page address associated with the map/VA pair. 1188 * The page must be wired for this to work reliably. 1189 * 1190 * XXX for the moment we're using pv_find() instead of pv_get(), as 1191 * callers might be expecting non-blocking operation. 1192 */ 1193 vm_paddr_t 1194 pmap_extract(pmap_t pmap, vm_offset_t va) 1195 { 1196 vm_paddr_t rtval; 1197 pv_entry_t pt_pv; 1198 pt_entry_t *ptep; 1199 1200 rtval = 0; 1201 if (va >= VM_MAX_USER_ADDRESS) { 1202 /* 1203 * Kernel page directories might be direct-mapped and 1204 * there is typically no PV tracking of pte's 1205 */ 1206 pd_entry_t *pt; 1207 1208 pt = pmap_pt(pmap, va); 1209 if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) { 1210 if (*pt & pmap->pmap_bits[PG_PS_IDX]) { 1211 rtval = *pt & PG_PS_FRAME; 1212 rtval |= va & PDRMASK; 1213 } else { 1214 ptep = pmap_pt_to_pte(*pt, va); 1215 if (*pt & pmap->pmap_bits[PG_V_IDX]) { 1216 rtval = *ptep & PG_FRAME; 1217 rtval |= va & PAGE_MASK; 1218 } 1219 } 1220 } 1221 } else { 1222 /* 1223 * User pages currently do not direct-map the page directory 1224 * and some pages might not used managed PVs. But all PT's 1225 * will have a PV. 1226 */ 1227 pt_pv = pv_find(pmap, pmap_pt_pindex(va)); 1228 if (pt_pv) { 1229 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 1230 if (*ptep & pmap->pmap_bits[PG_V_IDX]) { 1231 rtval = *ptep & PG_FRAME; 1232 rtval |= va & PAGE_MASK; 1233 } 1234 pv_drop(pt_pv); 1235 } 1236 } 1237 return rtval; 1238 } 1239 1240 /* 1241 * Similar to extract but checks protections, SMP-friendly short-cut for 1242 * vm_fault_page[_quick](). Can return NULL to cause the caller to 1243 * fall-through to the real fault code. 1244 * 1245 * The returned page, if not NULL, is held (and not busied). 1246 */ 1247 vm_page_t 1248 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1249 { 1250 if (pmap && va < VM_MAX_USER_ADDRESS) { 1251 pv_entry_t pt_pv; 1252 pv_entry_t pte_pv; 1253 pt_entry_t *ptep; 1254 pt_entry_t req; 1255 vm_page_t m; 1256 int error; 1257 1258 req = pmap->pmap_bits[PG_V_IDX] | 1259 pmap->pmap_bits[PG_U_IDX]; 1260 if (prot & VM_PROT_WRITE) 1261 req |= pmap->pmap_bits[PG_RW_IDX]; 1262 1263 pt_pv = pv_find(pmap, pmap_pt_pindex(va)); 1264 if (pt_pv == NULL) 1265 return (NULL); 1266 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 1267 if ((*ptep & req) != req) { 1268 pv_drop(pt_pv); 1269 return (NULL); 1270 } 1271 pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), &error); 1272 if (pte_pv && error == 0) { 1273 m = pte_pv->pv_m; 1274 vm_page_hold(m); 1275 if (prot & VM_PROT_WRITE) 1276 vm_page_dirty(m); 1277 pv_put(pte_pv); 1278 } else if (pte_pv) { 1279 pv_drop(pte_pv); 1280 m = NULL; 1281 } else { 1282 m = NULL; 1283 } 1284 pv_drop(pt_pv); 1285 return(m); 1286 } else { 1287 return(NULL); 1288 } 1289 } 1290 1291 /* 1292 * Extract the physical page address associated kernel virtual address. 1293 */ 1294 vm_paddr_t 1295 pmap_kextract(vm_offset_t va) 1296 { 1297 pd_entry_t pt; /* pt entry in pd */ 1298 vm_paddr_t pa; 1299 1300 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { 1301 pa = DMAP_TO_PHYS(va); 1302 } else { 1303 pt = *vtopt(va); 1304 if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) { 1305 pa = (pt & PG_PS_FRAME) | (va & PDRMASK); 1306 } else { 1307 /* 1308 * Beware of a concurrent promotion that changes the 1309 * PDE at this point! For example, vtopte() must not 1310 * be used to access the PTE because it would use the 1311 * new PDE. It is, however, safe to use the old PDE 1312 * because the page table page is preserved by the 1313 * promotion. 1314 */ 1315 pa = *pmap_pt_to_pte(pt, va); 1316 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1317 } 1318 } 1319 return pa; 1320 } 1321 1322 /*************************************************** 1323 * Low level mapping routines..... 1324 ***************************************************/ 1325 1326 /* 1327 * Routine: pmap_kenter 1328 * Function: 1329 * Add a wired page to the KVA 1330 * NOTE! note that in order for the mapping to take effect -- you 1331 * should do an invltlb after doing the pmap_kenter(). 1332 */ 1333 void 1334 pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1335 { 1336 pt_entry_t *pte; 1337 pt_entry_t npte; 1338 pmap_inval_info info; 1339 1340 pmap_inval_init(&info); /* XXX remove */ 1341 npte = pa | 1342 kernel_pmap.pmap_bits[PG_RW_IDX] | 1343 kernel_pmap.pmap_bits[PG_V_IDX]; 1344 // pgeflag; 1345 pte = vtopte(va); 1346 pmap_inval_interlock(&info, &kernel_pmap, va); /* XXX remove */ 1347 *pte = npte; 1348 pmap_inval_deinterlock(&info, &kernel_pmap); /* XXX remove */ 1349 pmap_inval_done(&info); /* XXX remove */ 1350 } 1351 1352 /* 1353 * Routine: pmap_kenter_quick 1354 * Function: 1355 * Similar to pmap_kenter(), except we only invalidate the 1356 * mapping on the current CPU. 1357 */ 1358 void 1359 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa) 1360 { 1361 pt_entry_t *pte; 1362 pt_entry_t npte; 1363 1364 npte = pa | 1365 kernel_pmap.pmap_bits[PG_RW_IDX] | 1366 kernel_pmap.pmap_bits[PG_V_IDX]; 1367 // pgeflag; 1368 pte = vtopte(va); 1369 *pte = npte; 1370 cpu_invlpg((void *)va); 1371 } 1372 1373 void 1374 pmap_kenter_sync(vm_offset_t va) 1375 { 1376 pmap_inval_info info; 1377 1378 pmap_inval_init(&info); 1379 pmap_inval_interlock(&info, &kernel_pmap, va); 1380 pmap_inval_deinterlock(&info, &kernel_pmap); 1381 pmap_inval_done(&info); 1382 } 1383 1384 void 1385 pmap_kenter_sync_quick(vm_offset_t va) 1386 { 1387 cpu_invlpg((void *)va); 1388 } 1389 1390 /* 1391 * remove a page from the kernel pagetables 1392 */ 1393 void 1394 pmap_kremove(vm_offset_t va) 1395 { 1396 pt_entry_t *pte; 1397 pmap_inval_info info; 1398 1399 pmap_inval_init(&info); 1400 pte = vtopte(va); 1401 pmap_inval_interlock(&info, &kernel_pmap, va); 1402 (void)pte_load_clear(pte); 1403 pmap_inval_deinterlock(&info, &kernel_pmap); 1404 pmap_inval_done(&info); 1405 } 1406 1407 void 1408 pmap_kremove_quick(vm_offset_t va) 1409 { 1410 pt_entry_t *pte; 1411 pte = vtopte(va); 1412 (void)pte_load_clear(pte); 1413 cpu_invlpg((void *)va); 1414 } 1415 1416 /* 1417 * XXX these need to be recoded. They are not used in any critical path. 1418 */ 1419 void 1420 pmap_kmodify_rw(vm_offset_t va) 1421 { 1422 atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]); 1423 cpu_invlpg((void *)va); 1424 } 1425 1426 /* NOT USED 1427 void 1428 pmap_kmodify_nc(vm_offset_t va) 1429 { 1430 atomic_set_long(vtopte(va), PG_N); 1431 cpu_invlpg((void *)va); 1432 } 1433 */ 1434 1435 /* 1436 * Used to map a range of physical addresses into kernel virtual 1437 * address space during the low level boot, typically to map the 1438 * dump bitmap, message buffer, and vm_page_array. 1439 * 1440 * These mappings are typically made at some pointer after the end of the 1441 * kernel text+data. 1442 * 1443 * We could return PHYS_TO_DMAP(start) here and not allocate any 1444 * via (*virtp), but then kmem from userland and kernel dumps won't 1445 * have access to the related pointers. 1446 */ 1447 vm_offset_t 1448 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot) 1449 { 1450 vm_offset_t va; 1451 vm_offset_t va_start; 1452 1453 /*return PHYS_TO_DMAP(start);*/ 1454 1455 va_start = *virtp; 1456 va = va_start; 1457 1458 while (start < end) { 1459 pmap_kenter_quick(va, start); 1460 va += PAGE_SIZE; 1461 start += PAGE_SIZE; 1462 } 1463 *virtp = va; 1464 return va_start; 1465 } 1466 1467 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 1468 1469 /* 1470 * Remove the specified set of pages from the data and instruction caches. 1471 * 1472 * In contrast to pmap_invalidate_cache_range(), this function does not 1473 * rely on the CPU's self-snoop feature, because it is intended for use 1474 * when moving pages into a different cache domain. 1475 */ 1476 void 1477 pmap_invalidate_cache_pages(vm_page_t *pages, int count) 1478 { 1479 vm_offset_t daddr, eva; 1480 int i; 1481 1482 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 1483 (cpu_feature & CPUID_CLFSH) == 0) 1484 wbinvd(); 1485 else { 1486 cpu_mfence(); 1487 for (i = 0; i < count; i++) { 1488 daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i])); 1489 eva = daddr + PAGE_SIZE; 1490 for (; daddr < eva; daddr += cpu_clflush_line_size) 1491 clflush(daddr); 1492 } 1493 cpu_mfence(); 1494 } 1495 } 1496 1497 void 1498 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 1499 { 1500 KASSERT((sva & PAGE_MASK) == 0, 1501 ("pmap_invalidate_cache_range: sva not page-aligned")); 1502 KASSERT((eva & PAGE_MASK) == 0, 1503 ("pmap_invalidate_cache_range: eva not page-aligned")); 1504 1505 if (cpu_feature & CPUID_SS) { 1506 ; /* If "Self Snoop" is supported, do nothing. */ 1507 } else { 1508 /* Globally invalidate caches */ 1509 cpu_wbinvd_on_all_cpus(); 1510 } 1511 } 1512 void 1513 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1514 { 1515 smp_invlpg_range(pmap->pm_active, sva, eva); 1516 } 1517 1518 /* 1519 * Add a list of wired pages to the kva 1520 * this routine is only used for temporary 1521 * kernel mappings that do not need to have 1522 * page modification or references recorded. 1523 * Note that old mappings are simply written 1524 * over. The page *must* be wired. 1525 */ 1526 void 1527 pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1528 { 1529 vm_offset_t end_va; 1530 1531 end_va = va + count * PAGE_SIZE; 1532 1533 while (va < end_va) { 1534 pt_entry_t *pte; 1535 1536 pte = vtopte(va); 1537 *pte = VM_PAGE_TO_PHYS(*m) | 1538 kernel_pmap.pmap_bits[PG_RW_IDX] | 1539 kernel_pmap.pmap_bits[PG_V_IDX] | 1540 kernel_pmap.pmap_cache_bits[(*m)->pat_mode]; 1541 // pgeflag; 1542 cpu_invlpg((void *)va); 1543 va += PAGE_SIZE; 1544 m++; 1545 } 1546 smp_invltlb(); 1547 } 1548 1549 /* 1550 * This routine jerks page mappings from the 1551 * kernel -- it is meant only for temporary mappings. 1552 * 1553 * MPSAFE, INTERRUPT SAFE (cluster callback) 1554 */ 1555 void 1556 pmap_qremove(vm_offset_t va, int count) 1557 { 1558 vm_offset_t end_va; 1559 1560 end_va = va + count * PAGE_SIZE; 1561 1562 while (va < end_va) { 1563 pt_entry_t *pte; 1564 1565 pte = vtopte(va); 1566 (void)pte_load_clear(pte); 1567 cpu_invlpg((void *)va); 1568 va += PAGE_SIZE; 1569 } 1570 smp_invltlb(); 1571 } 1572 1573 /* 1574 * Create a new thread and optionally associate it with a (new) process. 1575 * NOTE! the new thread's cpu may not equal the current cpu. 1576 */ 1577 void 1578 pmap_init_thread(thread_t td) 1579 { 1580 /* enforce pcb placement & alignment */ 1581 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1; 1582 td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF); 1583 td->td_savefpu = &td->td_pcb->pcb_save; 1584 td->td_sp = (char *)td->td_pcb; /* no -16 */ 1585 } 1586 1587 /* 1588 * This routine directly affects the fork perf for a process. 1589 */ 1590 void 1591 pmap_init_proc(struct proc *p) 1592 { 1593 } 1594 1595 static void 1596 pmap_pinit_defaults(struct pmap *pmap) 1597 { 1598 bcopy(pmap_bits_default, pmap->pmap_bits, 1599 sizeof(pmap_bits_default)); 1600 bcopy(protection_codes, pmap->protection_codes, 1601 sizeof(protection_codes)); 1602 bcopy(pat_pte_index, pmap->pmap_cache_bits, 1603 sizeof(pat_pte_index)); 1604 pmap->pmap_cache_mask = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT; 1605 pmap->copyinstr = std_copyinstr; 1606 pmap->copyin = std_copyin; 1607 pmap->copyout = std_copyout; 1608 pmap->fubyte = std_fubyte; 1609 pmap->subyte = std_subyte; 1610 pmap->fuword = std_fuword; 1611 pmap->suword = std_suword; 1612 pmap->suword32 = std_suword32; 1613 } 1614 /* 1615 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because 1616 * it, and IdlePTD, represents the template used to update all other pmaps. 1617 * 1618 * On architectures where the kernel pmap is not integrated into the user 1619 * process pmap, this pmap represents the process pmap, not the kernel pmap. 1620 * kernel_pmap should be used to directly access the kernel_pmap. 1621 */ 1622 void 1623 pmap_pinit0(struct pmap *pmap) 1624 { 1625 pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys); 1626 pmap->pm_count = 1; 1627 CPUMASK_ASSZERO(pmap->pm_active); 1628 pmap->pm_pvhint = NULL; 1629 RB_INIT(&pmap->pm_pvroot); 1630 spin_init(&pmap->pm_spin, "pmapinit0"); 1631 lwkt_token_init(&pmap->pm_token, "pmap_tok"); 1632 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1633 pmap_pinit_defaults(pmap); 1634 } 1635 1636 /* 1637 * Initialize a preallocated and zeroed pmap structure, 1638 * such as one in a vmspace structure. 1639 */ 1640 static void 1641 pmap_pinit_simple(struct pmap *pmap) 1642 { 1643 /* 1644 * Misc initialization 1645 */ 1646 pmap->pm_count = 1; 1647 CPUMASK_ASSZERO(pmap->pm_active); 1648 pmap->pm_pvhint = NULL; 1649 pmap->pm_flags = PMAP_FLAG_SIMPLE; 1650 1651 pmap_pinit_defaults(pmap); 1652 1653 /* 1654 * Don't blow up locks/tokens on re-use (XXX fix/use drop code 1655 * for this). 1656 */ 1657 if (pmap->pm_pmlpv == NULL) { 1658 RB_INIT(&pmap->pm_pvroot); 1659 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1660 spin_init(&pmap->pm_spin, "pmapinitsimple"); 1661 lwkt_token_init(&pmap->pm_token, "pmap_tok"); 1662 } 1663 } 1664 1665 void 1666 pmap_pinit(struct pmap *pmap) 1667 { 1668 pv_entry_t pv; 1669 int j; 1670 1671 if (pmap->pm_pmlpv) { 1672 if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) { 1673 pmap_puninit(pmap); 1674 } 1675 } 1676 1677 pmap_pinit_simple(pmap); 1678 pmap->pm_flags &= ~PMAP_FLAG_SIMPLE; 1679 1680 /* 1681 * No need to allocate page table space yet but we do need a valid 1682 * page directory table. 1683 */ 1684 if (pmap->pm_pml4 == NULL) { 1685 pmap->pm_pml4 = 1686 (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE); 1687 } 1688 1689 /* 1690 * Allocate the page directory page, which wires it even though 1691 * it isn't being entered into some higher level page table (it 1692 * being the highest level). If one is already cached we don't 1693 * have to do anything. 1694 */ 1695 if ((pv = pmap->pm_pmlpv) == NULL) { 1696 pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL); 1697 pmap->pm_pmlpv = pv; 1698 pmap_kenter((vm_offset_t)pmap->pm_pml4, 1699 VM_PAGE_TO_PHYS(pv->pv_m)); 1700 pv_put(pv); 1701 1702 /* 1703 * Install DMAP and KMAP. 1704 */ 1705 for (j = 0; j < NDMPML4E; ++j) { 1706 pmap->pm_pml4[DMPML4I + j] = 1707 (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) | 1708 pmap->pmap_bits[PG_RW_IDX] | 1709 pmap->pmap_bits[PG_V_IDX] | 1710 pmap->pmap_bits[PG_U_IDX]; 1711 } 1712 pmap->pm_pml4[KPML4I] = KPDPphys | 1713 pmap->pmap_bits[PG_RW_IDX] | 1714 pmap->pmap_bits[PG_V_IDX] | 1715 pmap->pmap_bits[PG_U_IDX]; 1716 1717 /* 1718 * install self-referential address mapping entry 1719 */ 1720 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) | 1721 pmap->pmap_bits[PG_V_IDX] | 1722 pmap->pmap_bits[PG_RW_IDX] | 1723 pmap->pmap_bits[PG_A_IDX] | 1724 pmap->pmap_bits[PG_M_IDX]; 1725 } else { 1726 KKASSERT(pv->pv_m->flags & PG_MAPPED); 1727 KKASSERT(pv->pv_m->flags & PG_WRITEABLE); 1728 } 1729 KKASSERT(pmap->pm_pml4[255] == 0); 1730 KKASSERT(RB_ROOT(&pmap->pm_pvroot) == pv); 1731 KKASSERT(pv->pv_entry.rbe_left == NULL); 1732 KKASSERT(pv->pv_entry.rbe_right == NULL); 1733 } 1734 1735 /* 1736 * Clean up a pmap structure so it can be physically freed. This routine 1737 * is called by the vmspace dtor function. A great deal of pmap data is 1738 * left passively mapped to improve vmspace management so we have a bit 1739 * of cleanup work to do here. 1740 */ 1741 void 1742 pmap_puninit(pmap_t pmap) 1743 { 1744 pv_entry_t pv; 1745 vm_page_t p; 1746 1747 KKASSERT(CPUMASK_TESTZERO(pmap->pm_active)); 1748 if ((pv = pmap->pm_pmlpv) != NULL) { 1749 if (pv_hold_try(pv) == 0) 1750 pv_lock(pv); 1751 KKASSERT(pv == pmap->pm_pmlpv); 1752 p = pmap_remove_pv_page(pv); 1753 pv_free(pv); 1754 pmap_kremove((vm_offset_t)pmap->pm_pml4); 1755 vm_page_busy_wait(p, FALSE, "pgpun"); 1756 KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED)); 1757 vm_page_unwire(p, 0); 1758 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE); 1759 1760 /* 1761 * XXX eventually clean out PML4 static entries and 1762 * use vm_page_free_zero() 1763 */ 1764 vm_page_free(p); 1765 pmap->pm_pmlpv = NULL; 1766 } 1767 if (pmap->pm_pml4) { 1768 KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys)); 1769 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE); 1770 pmap->pm_pml4 = NULL; 1771 } 1772 KKASSERT(pmap->pm_stats.resident_count == 0); 1773 KKASSERT(pmap->pm_stats.wired_count == 0); 1774 } 1775 1776 /* 1777 * Wire in kernel global address entries. To avoid a race condition 1778 * between pmap initialization and pmap_growkernel, this procedure 1779 * adds the pmap to the master list (which growkernel scans to update), 1780 * then copies the template. 1781 */ 1782 void 1783 pmap_pinit2(struct pmap *pmap) 1784 { 1785 spin_lock(&pmap_spin); 1786 TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode); 1787 spin_unlock(&pmap_spin); 1788 } 1789 1790 /* 1791 * This routine is called when various levels in the page table need to 1792 * be populated. This routine cannot fail. 1793 * 1794 * This function returns two locked pv_entry's, one representing the 1795 * requested pv and one representing the requested pv's parent pv. If 1796 * the pv did not previously exist it will be mapped into its parent 1797 * and wired, otherwise no additional wire count will be added. 1798 */ 1799 static 1800 pv_entry_t 1801 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp) 1802 { 1803 pt_entry_t *ptep; 1804 pv_entry_t pv; 1805 pv_entry_t pvp; 1806 vm_pindex_t pt_pindex; 1807 vm_page_t m; 1808 int isnew; 1809 int ispt; 1810 1811 /* 1812 * If the pv already exists and we aren't being asked for the 1813 * parent page table page we can just return it. A locked+held pv 1814 * is returned. The pv will also have a second hold related to the 1815 * pmap association that we don't have to worry about. 1816 */ 1817 ispt = 0; 1818 pv = pv_alloc(pmap, ptepindex, &isnew); 1819 if (isnew == 0 && pvpp == NULL) 1820 return(pv); 1821 1822 /* 1823 * Special case terminal PVs. These are not page table pages so 1824 * no vm_page is allocated (the caller supplied the vm_page). If 1825 * pvpp is non-NULL we are being asked to also removed the pt_pv 1826 * for this pv. 1827 * 1828 * Note that pt_pv's are only returned for user VAs. We assert that 1829 * a pt_pv is not being requested for kernel VAs. 1830 */ 1831 if (ptepindex < pmap_pt_pindex(0)) { 1832 if (ptepindex >= NUPTE_USER) 1833 KKASSERT(pvpp == NULL); 1834 else 1835 KKASSERT(pvpp != NULL); 1836 if (pvpp) { 1837 pt_pindex = NUPTE_TOTAL + (ptepindex >> NPTEPGSHIFT); 1838 pvp = pmap_allocpte(pmap, pt_pindex, NULL); 1839 if (isnew) 1840 vm_page_wire_quick(pvp->pv_m); 1841 *pvpp = pvp; 1842 } else { 1843 pvp = NULL; 1844 } 1845 return(pv); 1846 } 1847 1848 /* 1849 * Non-terminal PVs allocate a VM page to represent the page table, 1850 * so we have to resolve pvp and calculate ptepindex for the pvp 1851 * and then for the page table entry index in the pvp for 1852 * fall-through. 1853 */ 1854 if (ptepindex < pmap_pd_pindex(0)) { 1855 /* 1856 * pv is PT, pvp is PD 1857 */ 1858 ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT; 1859 ptepindex += NUPTE_TOTAL + NUPT_TOTAL; 1860 pvp = pmap_allocpte(pmap, ptepindex, NULL); 1861 if (!isnew) 1862 goto notnew; 1863 1864 /* 1865 * PT index in PD 1866 */ 1867 ptepindex = pv->pv_pindex - pmap_pt_pindex(0); 1868 ptepindex &= ((1ul << NPDEPGSHIFT) - 1); 1869 ispt = 1; 1870 } else if (ptepindex < pmap_pdp_pindex(0)) { 1871 /* 1872 * pv is PD, pvp is PDP 1873 * 1874 * SIMPLE PMAP NOTE: Simple pmaps do not allocate above 1875 * the PD. 1876 */ 1877 ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT; 1878 ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL; 1879 1880 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) { 1881 KKASSERT(pvpp == NULL); 1882 pvp = NULL; 1883 } else { 1884 pvp = pmap_allocpte(pmap, ptepindex, NULL); 1885 } 1886 if (!isnew) 1887 goto notnew; 1888 1889 /* 1890 * PD index in PDP 1891 */ 1892 ptepindex = pv->pv_pindex - pmap_pd_pindex(0); 1893 ptepindex &= ((1ul << NPDPEPGSHIFT) - 1); 1894 } else if (ptepindex < pmap_pml4_pindex()) { 1895 /* 1896 * pv is PDP, pvp is the root pml4 table 1897 */ 1898 pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL); 1899 if (!isnew) 1900 goto notnew; 1901 1902 /* 1903 * PDP index in PML4 1904 */ 1905 ptepindex = pv->pv_pindex - pmap_pdp_pindex(0); 1906 ptepindex &= ((1ul << NPML4EPGSHIFT) - 1); 1907 } else { 1908 /* 1909 * pv represents the top-level PML4, there is no parent. 1910 */ 1911 pvp = NULL; 1912 if (!isnew) 1913 goto notnew; 1914 } 1915 1916 /* 1917 * This code is only reached if isnew is TRUE and this is not a 1918 * terminal PV. We need to allocate a vm_page for the page table 1919 * at this level and enter it into the parent page table. 1920 * 1921 * page table pages are marked PG_WRITEABLE and PG_MAPPED. 1922 */ 1923 for (;;) { 1924 m = vm_page_alloc(NULL, pv->pv_pindex, 1925 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | 1926 VM_ALLOC_INTERRUPT); 1927 if (m) 1928 break; 1929 vm_wait(0); 1930 } 1931 vm_page_spin_lock(m); 1932 pmap_page_stats_adding(m); 1933 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1934 pv->pv_m = m; 1935 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1936 vm_page_spin_unlock(m); 1937 vm_page_unmanage(m); /* m must be spinunlocked */ 1938 1939 if ((m->flags & PG_ZERO) == 0) { 1940 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 1941 } 1942 #ifdef PMAP_DEBUG 1943 else { 1944 pmap_page_assertzero(VM_PAGE_TO_PHYS(m)); 1945 } 1946 #endif 1947 m->valid = VM_PAGE_BITS_ALL; 1948 vm_page_flag_clear(m, PG_ZERO); 1949 vm_page_wire(m); /* wire for mapping in parent */ 1950 1951 /* 1952 * Wire the page into pvp, bump the wire-count for pvp's page table 1953 * page. Bump the resident_count for the pmap. There is no pvp 1954 * for the top level, address the pm_pml4[] array directly. 1955 * 1956 * If the caller wants the parent we return it, otherwise 1957 * we just put it away. 1958 * 1959 * No interlock is needed for pte 0 -> non-zero. 1960 * 1961 * In the situation where *ptep is valid we might have an unmanaged 1962 * page table page shared from another page table which we need to 1963 * unshare before installing our private page table page. 1964 */ 1965 if (pvp) { 1966 ptep = pv_pte_lookup(pvp, ptepindex); 1967 if (*ptep & pmap->pmap_bits[PG_V_IDX]) { 1968 pt_entry_t pte; 1969 pmap_inval_info info; 1970 1971 if (ispt == 0) { 1972 panic("pmap_allocpte: unexpected pte %p/%d", 1973 pvp, (int)ptepindex); 1974 } 1975 pmap_inval_init(&info); 1976 pmap_inval_interlock(&info, pmap, (vm_offset_t)-1); 1977 pte = pte_load_clear(ptep); 1978 pmap_inval_deinterlock(&info, pmap); 1979 pmap_inval_done(&info); 1980 if (vm_page_unwire_quick( 1981 PHYS_TO_VM_PAGE(pte & PG_FRAME))) { 1982 panic("pmap_allocpte: shared pgtable " 1983 "pg bad wirecount"); 1984 } 1985 atomic_add_long(&pmap->pm_stats.resident_count, -1); 1986 } else { 1987 vm_page_wire_quick(pvp->pv_m); 1988 } 1989 *ptep = VM_PAGE_TO_PHYS(m) | 1990 (pmap->pmap_bits[PG_U_IDX] | 1991 pmap->pmap_bits[PG_RW_IDX] | 1992 pmap->pmap_bits[PG_V_IDX] | 1993 pmap->pmap_bits[PG_A_IDX] | 1994 pmap->pmap_bits[PG_M_IDX]); 1995 } 1996 vm_page_wakeup(m); 1997 notnew: 1998 if (pvpp) 1999 *pvpp = pvp; 2000 else if (pvp) 2001 pv_put(pvp); 2002 return (pv); 2003 } 2004 2005 /* 2006 * This version of pmap_allocpte() checks for possible segment optimizations 2007 * that would allow page-table sharing. It can be called for terminal 2008 * page or page table page ptepindex's. 2009 * 2010 * The function is called with page table page ptepindex's for fictitious 2011 * and unmanaged terminal pages. That is, we don't want to allocate a 2012 * terminal pv, we just want the pt_pv. pvpp is usually passed as NULL 2013 * for this case. 2014 * 2015 * This function can return a pv and *pvpp associated with the passed in pmap 2016 * OR a pv and *pvpp associated with the shared pmap. In the latter case 2017 * an unmanaged page table page will be entered into the pass in pmap. 2018 */ 2019 static 2020 pv_entry_t 2021 pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp, 2022 vm_map_entry_t entry, vm_offset_t va) 2023 { 2024 struct pmap_inval_info info; 2025 vm_object_t object; 2026 pmap_t obpmap; 2027 pmap_t *obpmapp; 2028 vm_offset_t b; 2029 pv_entry_t pte_pv; /* in original or shared pmap */ 2030 pv_entry_t pt_pv; /* in original or shared pmap */ 2031 pv_entry_t proc_pd_pv; /* in original pmap */ 2032 pv_entry_t proc_pt_pv; /* in original pmap */ 2033 pv_entry_t xpv; /* PT in shared pmap */ 2034 pd_entry_t *pt; /* PT entry in PD of original pmap */ 2035 pd_entry_t opte; /* contents of *pt */ 2036 pd_entry_t npte; /* contents of *pt */ 2037 vm_page_t m; 2038 2039 retry: 2040 /* 2041 * Basic tests, require a non-NULL vm_map_entry, require proper 2042 * alignment and type for the vm_map_entry, require that the 2043 * underlying object already be allocated. 2044 * 2045 * We allow almost any type of object to use this optimization. 2046 * The object itself does NOT have to be sized to a multiple of the 2047 * segment size, but the memory mapping does. 2048 * 2049 * XXX don't handle devices currently, because VM_PAGE_TO_PHYS() 2050 * won't work as expected. 2051 */ 2052 if (entry == NULL || 2053 pmap_mmu_optimize == 0 || /* not enabled */ 2054 ptepindex >= pmap_pd_pindex(0) || /* not terminal or pt */ 2055 entry->inheritance != VM_INHERIT_SHARE || /* not shared */ 2056 entry->maptype != VM_MAPTYPE_NORMAL || /* weird map type */ 2057 entry->object.vm_object == NULL || /* needs VM object */ 2058 entry->object.vm_object->type == OBJT_DEVICE || /* ick */ 2059 entry->object.vm_object->type == OBJT_MGTDEVICE || /* ick */ 2060 (entry->offset & SEG_MASK) || /* must be aligned */ 2061 (entry->start & SEG_MASK)) { 2062 return(pmap_allocpte(pmap, ptepindex, pvpp)); 2063 } 2064 2065 /* 2066 * Make sure the full segment can be represented. 2067 */ 2068 b = va & ~(vm_offset_t)SEG_MASK; 2069 if (b < entry->start || b + SEG_SIZE > entry->end) 2070 return(pmap_allocpte(pmap, ptepindex, pvpp)); 2071 2072 /* 2073 * If the full segment can be represented dive the VM object's 2074 * shared pmap, allocating as required. 2075 */ 2076 object = entry->object.vm_object; 2077 2078 if (entry->protection & VM_PROT_WRITE) 2079 obpmapp = &object->md.pmap_rw; 2080 else 2081 obpmapp = &object->md.pmap_ro; 2082 2083 #ifdef PMAP_DEBUG2 2084 if (pmap_enter_debug > 0) { 2085 --pmap_enter_debug; 2086 kprintf("pmap_allocpte_seg: va=%jx prot %08x o=%p " 2087 "obpmapp %p %p\n", 2088 va, entry->protection, object, 2089 obpmapp, *obpmapp); 2090 kprintf("pmap_allocpte_seg: entry %p %jx-%jx\n", 2091 entry, entry->start, entry->end); 2092 } 2093 #endif 2094 2095 /* 2096 * We allocate what appears to be a normal pmap but because portions 2097 * of this pmap are shared with other unrelated pmaps we have to 2098 * set pm_active to point to all cpus. 2099 * 2100 * XXX Currently using pmap_spin to interlock the update, can't use 2101 * vm_object_hold/drop because the token might already be held 2102 * shared OR exclusive and we don't know. 2103 */ 2104 while ((obpmap = *obpmapp) == NULL) { 2105 obpmap = kmalloc(sizeof(*obpmap), M_OBJPMAP, M_WAITOK|M_ZERO); 2106 pmap_pinit_simple(obpmap); 2107 pmap_pinit2(obpmap); 2108 spin_lock(&pmap_spin); 2109 if (*obpmapp != NULL) { 2110 /* 2111 * Handle race 2112 */ 2113 spin_unlock(&pmap_spin); 2114 pmap_release(obpmap); 2115 pmap_puninit(obpmap); 2116 kfree(obpmap, M_OBJPMAP); 2117 obpmap = *obpmapp; /* safety */ 2118 } else { 2119 obpmap->pm_active = smp_active_mask; 2120 *obpmapp = obpmap; 2121 spin_unlock(&pmap_spin); 2122 } 2123 } 2124 2125 /* 2126 * Layering is: PTE, PT, PD, PDP, PML4. We have to return the 2127 * pte/pt using the shared pmap from the object but also adjust 2128 * the process pmap's page table page as a side effect. 2129 */ 2130 2131 /* 2132 * Resolve the terminal PTE and PT in the shared pmap. This is what 2133 * we will return. This is true if ptepindex represents a terminal 2134 * page, otherwise pte_pv is actually the PT and pt_pv is actually 2135 * the PD. 2136 */ 2137 pt_pv = NULL; 2138 pte_pv = pmap_allocpte(obpmap, ptepindex, &pt_pv); 2139 if (ptepindex >= pmap_pt_pindex(0)) 2140 xpv = pte_pv; 2141 else 2142 xpv = pt_pv; 2143 2144 /* 2145 * Resolve the PD in the process pmap so we can properly share the 2146 * page table page. Lock order is bottom-up (leaf first)! 2147 * 2148 * NOTE: proc_pt_pv can be NULL. 2149 */ 2150 proc_pt_pv = pv_get(pmap, pmap_pt_pindex(b)); 2151 proc_pd_pv = pmap_allocpte(pmap, pmap_pd_pindex(b), NULL); 2152 #ifdef PMAP_DEBUG2 2153 if (pmap_enter_debug > 0) { 2154 --pmap_enter_debug; 2155 kprintf("proc_pt_pv %p (wc %d) pd_pv %p va=%jx\n", 2156 proc_pt_pv, 2157 (proc_pt_pv ? proc_pt_pv->pv_m->wire_count : -1), 2158 proc_pd_pv, 2159 va); 2160 } 2161 #endif 2162 2163 /* 2164 * xpv is the page table page pv from the shared object 2165 * (for convenience), from above. 2166 * 2167 * Calculate the pte value for the PT to load into the process PD. 2168 * If we have to change it we must properly dispose of the previous 2169 * entry. 2170 */ 2171 pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b)); 2172 npte = VM_PAGE_TO_PHYS(xpv->pv_m) | 2173 (pmap->pmap_bits[PG_U_IDX] | 2174 pmap->pmap_bits[PG_RW_IDX] | 2175 pmap->pmap_bits[PG_V_IDX] | 2176 pmap->pmap_bits[PG_A_IDX] | 2177 pmap->pmap_bits[PG_M_IDX]); 2178 2179 /* 2180 * Dispose of previous page table page if it was local to the 2181 * process pmap. If the old pt is not empty we cannot dispose of it 2182 * until we clean it out. This case should not arise very often so 2183 * it is not optimized. 2184 */ 2185 if (proc_pt_pv) { 2186 if (proc_pt_pv->pv_m->wire_count != 1) { 2187 pv_put(proc_pd_pv); 2188 pv_put(proc_pt_pv); 2189 pv_put(pt_pv); 2190 pv_put(pte_pv); 2191 pmap_remove(pmap, 2192 va & ~(vm_offset_t)SEG_MASK, 2193 (va + SEG_SIZE) & ~(vm_offset_t)SEG_MASK); 2194 goto retry; 2195 } 2196 2197 /* 2198 * The release call will indirectly clean out *pt 2199 */ 2200 pmap_inval_init(&info); 2201 pmap_release_pv(&info, proc_pt_pv, proc_pd_pv); 2202 pmap_inval_done(&info); 2203 proc_pt_pv = NULL; 2204 /* relookup */ 2205 pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b)); 2206 } 2207 2208 /* 2209 * Handle remaining cases. 2210 */ 2211 if (*pt == 0) { 2212 *pt = npte; 2213 vm_page_wire_quick(xpv->pv_m); 2214 vm_page_wire_quick(proc_pd_pv->pv_m); 2215 atomic_add_long(&pmap->pm_stats.resident_count, 1); 2216 } else if (*pt != npte) { 2217 pmap_inval_init(&info); 2218 pmap_inval_interlock(&info, pmap, (vm_offset_t)-1); 2219 2220 opte = pte_load_clear(pt); 2221 KKASSERT(opte && opte != npte); 2222 2223 *pt = npte; 2224 vm_page_wire_quick(xpv->pv_m); /* pgtable pg that is npte */ 2225 2226 /* 2227 * Clean up opte, bump the wire_count for the process 2228 * PD page representing the new entry if it was 2229 * previously empty. 2230 * 2231 * If the entry was not previously empty and we have 2232 * a PT in the proc pmap then opte must match that 2233 * pt. The proc pt must be retired (this is done 2234 * later on in this procedure). 2235 * 2236 * NOTE: replacing valid pte, wire_count on proc_pd_pv 2237 * stays the same. 2238 */ 2239 KKASSERT(opte & pmap->pmap_bits[PG_V_IDX]); 2240 m = PHYS_TO_VM_PAGE(opte & PG_FRAME); 2241 if (vm_page_unwire_quick(m)) { 2242 panic("pmap_allocpte_seg: " 2243 "bad wire count %p", 2244 m); 2245 } 2246 2247 pmap_inval_deinterlock(&info, pmap); 2248 pmap_inval_done(&info); 2249 } 2250 2251 /* 2252 * The existing process page table was replaced and must be destroyed 2253 * here. 2254 */ 2255 if (proc_pd_pv) 2256 pv_put(proc_pd_pv); 2257 if (pvpp) 2258 *pvpp = pt_pv; 2259 else 2260 pv_put(pt_pv); 2261 2262 return (pte_pv); 2263 } 2264 2265 /* 2266 * Release any resources held by the given physical map. 2267 * 2268 * Called when a pmap initialized by pmap_pinit is being released. Should 2269 * only be called if the map contains no valid mappings. 2270 * 2271 * Caller must hold pmap->pm_token 2272 */ 2273 struct pmap_release_info { 2274 pmap_t pmap; 2275 int retry; 2276 }; 2277 2278 static int pmap_release_callback(pv_entry_t pv, void *data); 2279 2280 void 2281 pmap_release(struct pmap *pmap) 2282 { 2283 struct pmap_release_info info; 2284 2285 KASSERT(CPUMASK_TESTZERO(pmap->pm_active), 2286 ("pmap still active! %016jx", 2287 (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active))); 2288 2289 spin_lock(&pmap_spin); 2290 TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode); 2291 spin_unlock(&pmap_spin); 2292 2293 /* 2294 * Pull pv's off the RB tree in order from low to high and release 2295 * each page. 2296 */ 2297 info.pmap = pmap; 2298 do { 2299 info.retry = 0; 2300 spin_lock(&pmap->pm_spin); 2301 RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL, 2302 pmap_release_callback, &info); 2303 spin_unlock(&pmap->pm_spin); 2304 } while (info.retry); 2305 2306 2307 /* 2308 * One resident page (the pml4 page) should remain. 2309 * No wired pages should remain. 2310 */ 2311 KKASSERT(pmap->pm_stats.resident_count == 2312 ((pmap->pm_flags & PMAP_FLAG_SIMPLE) ? 0 : 1)); 2313 2314 KKASSERT(pmap->pm_stats.wired_count == 0); 2315 } 2316 2317 static int 2318 pmap_release_callback(pv_entry_t pv, void *data) 2319 { 2320 struct pmap_release_info *info = data; 2321 pmap_t pmap = info->pmap; 2322 int r; 2323 2324 if (pv_hold_try(pv)) { 2325 spin_unlock(&pmap->pm_spin); 2326 } else { 2327 spin_unlock(&pmap->pm_spin); 2328 pv_lock(pv); 2329 } 2330 if (pv->pv_pmap != pmap) { 2331 pv_put(pv); 2332 spin_lock(&pmap->pm_spin); 2333 info->retry = 1; 2334 return(-1); 2335 } 2336 r = pmap_release_pv(NULL, pv, NULL); 2337 spin_lock(&pmap->pm_spin); 2338 return(r); 2339 } 2340 2341 /* 2342 * Called with held (i.e. also locked) pv. This function will dispose of 2343 * the lock along with the pv. 2344 * 2345 * If the caller already holds the locked parent page table for pv it 2346 * must pass it as pvp, allowing us to avoid a deadlock, else it can 2347 * pass NULL for pvp. 2348 */ 2349 static int 2350 pmap_release_pv(struct pmap_inval_info *info, pv_entry_t pv, pv_entry_t pvp) 2351 { 2352 vm_page_t p; 2353 2354 /* 2355 * The pmap is currently not spinlocked, pv is held+locked. 2356 * Remove the pv's page from its parent's page table. The 2357 * parent's page table page's wire_count will be decremented. 2358 * 2359 * This will clean out the pte at any level of the page table. 2360 * If info is not NULL the appropriate invlpg/invltlb/smp 2361 * invalidation will be made. 2362 */ 2363 pmap_remove_pv_pte(pv, pvp, info); 2364 2365 /* 2366 * Terminal pvs are unhooked from their vm_pages. Because 2367 * terminal pages aren't page table pages they aren't wired 2368 * by us, so we have to be sure not to unwire them either. 2369 */ 2370 if (pv->pv_pindex < pmap_pt_pindex(0)) { 2371 pmap_remove_pv_page(pv); 2372 goto skip; 2373 } 2374 2375 /* 2376 * We leave the top-level page table page cached, wired, and 2377 * mapped in the pmap until the dtor function (pmap_puninit()) 2378 * gets called. 2379 * 2380 * Since we are leaving the top-level pv intact we need 2381 * to break out of what would otherwise be an infinite loop. 2382 */ 2383 if (pv->pv_pindex == pmap_pml4_pindex()) { 2384 pv_put(pv); 2385 return(-1); 2386 } 2387 2388 /* 2389 * For page table pages (other than the top-level page), 2390 * remove and free the vm_page. The representitive mapping 2391 * removed above by pmap_remove_pv_pte() did not undo the 2392 * last wire_count so we have to do that as well. 2393 */ 2394 p = pmap_remove_pv_page(pv); 2395 vm_page_busy_wait(p, FALSE, "pmaprl"); 2396 if (p->wire_count != 1) { 2397 kprintf("p->wire_count was %016lx %d\n", 2398 pv->pv_pindex, p->wire_count); 2399 } 2400 KKASSERT(p->wire_count == 1); 2401 KKASSERT(p->flags & PG_UNMANAGED); 2402 2403 vm_page_unwire(p, 0); 2404 KKASSERT(p->wire_count == 0); 2405 2406 /* 2407 * Theoretically this page, if not the pml4 page, should contain 2408 * all-zeros. But its just too dangerous to mark it PG_ZERO. Free 2409 * normally. 2410 */ 2411 vm_page_free(p); 2412 skip: 2413 pv_free(pv); 2414 return 0; 2415 } 2416 2417 /* 2418 * This function will remove the pte associated with a pv from its parent. 2419 * Terminal pv's are supported. The removal will be interlocked if info 2420 * is non-NULL. The caller must dispose of pv instead of just unlocking 2421 * it. 2422 * 2423 * The wire count will be dropped on the parent page table. The wire 2424 * count on the page being removed (pv->pv_m) from the parent page table 2425 * is NOT touched. Note that terminal pages will not have any additional 2426 * wire counts while page table pages will have at least one representing 2427 * the mapping, plus others representing sub-mappings. 2428 * 2429 * NOTE: Cannot be called on kernel page table pages, only KVM terminal 2430 * pages and user page table and terminal pages. 2431 * 2432 * The pv must be locked. 2433 * 2434 * XXX must lock parent pv's if they exist to remove pte XXX 2435 */ 2436 static 2437 void 2438 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, struct pmap_inval_info *info) 2439 { 2440 vm_pindex_t ptepindex = pv->pv_pindex; 2441 pmap_t pmap = pv->pv_pmap; 2442 vm_page_t p; 2443 int gotpvp = 0; 2444 2445 KKASSERT(pmap); 2446 2447 if (ptepindex == pmap_pml4_pindex()) { 2448 /* 2449 * We are the top level pml4 table, there is no parent. 2450 */ 2451 p = pmap->pm_pmlpv->pv_m; 2452 } else if (ptepindex >= pmap_pdp_pindex(0)) { 2453 /* 2454 * Remove a PDP page from the pml4e. This can only occur 2455 * with user page tables. We do not have to lock the 2456 * pml4 PV so just ignore pvp. 2457 */ 2458 vm_pindex_t pml4_pindex; 2459 vm_pindex_t pdp_index; 2460 pml4_entry_t *pdp; 2461 2462 pdp_index = ptepindex - pmap_pdp_pindex(0); 2463 if (pvp == NULL) { 2464 pml4_pindex = pmap_pml4_pindex(); 2465 pvp = pv_get(pv->pv_pmap, pml4_pindex); 2466 KKASSERT(pvp); 2467 gotpvp = 1; 2468 } 2469 pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)]; 2470 KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0); 2471 p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME); 2472 if (info) { 2473 pmap_inval_interlock(info, pmap, (vm_offset_t)-1); 2474 pte_load_clear(pdp); 2475 pmap_inval_deinterlock(info, pmap); 2476 } else { 2477 *pdp = 0; 2478 } 2479 } else if (ptepindex >= pmap_pd_pindex(0)) { 2480 /* 2481 * Remove a PD page from the pdp 2482 * 2483 * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case 2484 * of a simple pmap because it stops at 2485 * the PD page. 2486 */ 2487 vm_pindex_t pdp_pindex; 2488 vm_pindex_t pd_index; 2489 pdp_entry_t *pd; 2490 2491 pd_index = ptepindex - pmap_pd_pindex(0); 2492 2493 if (pvp == NULL) { 2494 pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + 2495 (pd_index >> NPML4EPGSHIFT); 2496 pvp = pv_get(pv->pv_pmap, pdp_pindex); 2497 if (pvp) 2498 gotpvp = 1; 2499 } 2500 if (pvp) { 2501 pd = pv_pte_lookup(pvp, pd_index & 2502 ((1ul << NPDPEPGSHIFT) - 1)); 2503 KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0); 2504 p = PHYS_TO_VM_PAGE(*pd & PG_FRAME); 2505 if (info) { 2506 pmap_inval_interlock(info, pmap, 2507 (vm_offset_t)-1); 2508 pte_load_clear(pd); 2509 pmap_inval_deinterlock(info, pmap); 2510 } else { 2511 *pd = 0; 2512 } 2513 } else { 2514 KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE); 2515 p = pv->pv_m; /* degenerate test later */ 2516 } 2517 } else if (ptepindex >= pmap_pt_pindex(0)) { 2518 /* 2519 * Remove a PT page from the pd 2520 */ 2521 vm_pindex_t pd_pindex; 2522 vm_pindex_t pt_index; 2523 pd_entry_t *pt; 2524 2525 pt_index = ptepindex - pmap_pt_pindex(0); 2526 2527 if (pvp == NULL) { 2528 pd_pindex = NUPTE_TOTAL + NUPT_TOTAL + 2529 (pt_index >> NPDPEPGSHIFT); 2530 pvp = pv_get(pv->pv_pmap, pd_pindex); 2531 KKASSERT(pvp); 2532 gotpvp = 1; 2533 } 2534 pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1)); 2535 KKASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0); 2536 p = PHYS_TO_VM_PAGE(*pt & PG_FRAME); 2537 if (info) { 2538 pmap_inval_interlock(info, pmap, (vm_offset_t)-1); 2539 pte_load_clear(pt); 2540 pmap_inval_deinterlock(info, pmap); 2541 } else { 2542 *pt = 0; 2543 } 2544 } else { 2545 /* 2546 * Remove a PTE from the PT page 2547 * 2548 * NOTE: pv's must be locked bottom-up to avoid deadlocking. 2549 * pv is a pte_pv so we can safely lock pt_pv. 2550 * 2551 * NOTE: FICTITIOUS pages may have multiple physical mappings 2552 * so PHYS_TO_VM_PAGE() will not necessarily work for 2553 * terminal ptes. 2554 */ 2555 vm_pindex_t pt_pindex; 2556 pt_entry_t *ptep; 2557 pt_entry_t pte; 2558 vm_offset_t va; 2559 2560 pt_pindex = ptepindex >> NPTEPGSHIFT; 2561 va = (vm_offset_t)ptepindex << PAGE_SHIFT; 2562 2563 if (ptepindex >= NUPTE_USER) { 2564 ptep = vtopte(ptepindex << PAGE_SHIFT); 2565 KKASSERT(pvp == NULL); 2566 } else { 2567 if (pvp == NULL) { 2568 pt_pindex = NUPTE_TOTAL + 2569 (ptepindex >> NPDPEPGSHIFT); 2570 pvp = pv_get(pv->pv_pmap, pt_pindex); 2571 KKASSERT(pvp); 2572 gotpvp = 1; 2573 } 2574 ptep = pv_pte_lookup(pvp, ptepindex & 2575 ((1ul << NPDPEPGSHIFT) - 1)); 2576 } 2577 2578 if (info) 2579 pmap_inval_interlock(info, pmap, va); 2580 pte = pte_load_clear(ptep); 2581 if (info) 2582 pmap_inval_deinterlock(info, pmap); 2583 else 2584 cpu_invlpg((void *)va); 2585 2586 /* 2587 * Now update the vm_page_t 2588 */ 2589 if ((pte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) != 2590 (pmap->pmap_bits[PG_MANAGED_IDX]|pmap->pmap_bits[PG_V_IDX])) { 2591 kprintf("remove_pte badpte %016lx %016lx %d\n", 2592 pte, pv->pv_pindex, 2593 pv->pv_pindex < pmap_pt_pindex(0)); 2594 } 2595 /* PHYS_TO_VM_PAGE() will not work for FICTITIOUS pages */ 2596 /*KKASSERT((pte & (PG_MANAGED|PG_V)) == (PG_MANAGED|PG_V));*/ 2597 if (pte & pmap->pmap_bits[PG_DEVICE_IDX]) 2598 p = pv->pv_m; 2599 else 2600 p = PHYS_TO_VM_PAGE(pte & PG_FRAME); 2601 /* p = pv->pv_m; */ 2602 2603 if (pte & pmap->pmap_bits[PG_M_IDX]) { 2604 if (pmap_track_modified(ptepindex)) 2605 vm_page_dirty(p); 2606 } 2607 if (pte & pmap->pmap_bits[PG_A_IDX]) { 2608 vm_page_flag_set(p, PG_REFERENCED); 2609 } 2610 if (pte & pmap->pmap_bits[PG_W_IDX]) 2611 atomic_add_long(&pmap->pm_stats.wired_count, -1); 2612 if (pte & pmap->pmap_bits[PG_G_IDX]) 2613 cpu_invlpg((void *)va); 2614 } 2615 2616 /* 2617 * Unwire the parent page table page. The wire_count cannot go below 2618 * 1 here because the parent page table page is itself still mapped. 2619 * 2620 * XXX remove the assertions later. 2621 */ 2622 KKASSERT(pv->pv_m == p); 2623 if (pvp && vm_page_unwire_quick(pvp->pv_m)) 2624 panic("pmap_remove_pv_pte: Insufficient wire_count"); 2625 2626 if (gotpvp) 2627 pv_put(pvp); 2628 } 2629 2630 /* 2631 * Remove the vm_page association to a pv. The pv must be locked. 2632 */ 2633 static 2634 vm_page_t 2635 pmap_remove_pv_page(pv_entry_t pv) 2636 { 2637 vm_page_t m; 2638 2639 m = pv->pv_m; 2640 KKASSERT(m); 2641 vm_page_spin_lock(m); 2642 pv->pv_m = NULL; 2643 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2644 pmap_page_stats_deleting(m); 2645 /* 2646 if (m->object) 2647 atomic_add_int(&m->object->agg_pv_list_count, -1); 2648 */ 2649 if (TAILQ_EMPTY(&m->md.pv_list)) 2650 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); 2651 vm_page_spin_unlock(m); 2652 return(m); 2653 } 2654 2655 /* 2656 * Grow the number of kernel page table entries, if needed. 2657 * 2658 * This routine is always called to validate any address space 2659 * beyond KERNBASE (for kldloads). kernel_vm_end only governs the address 2660 * space below KERNBASE. 2661 */ 2662 void 2663 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) 2664 { 2665 vm_paddr_t paddr; 2666 vm_offset_t ptppaddr; 2667 vm_page_t nkpg; 2668 pd_entry_t *pt, newpt; 2669 pdp_entry_t newpd; 2670 int update_kernel_vm_end; 2671 2672 /* 2673 * bootstrap kernel_vm_end on first real VM use 2674 */ 2675 if (kernel_vm_end == 0) { 2676 kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 2677 nkpt = 0; 2678 while ((*pmap_pt(&kernel_pmap, kernel_vm_end) & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) { 2679 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & 2680 ~(PAGE_SIZE * NPTEPG - 1); 2681 nkpt++; 2682 if (kernel_vm_end - 1 >= kernel_map.max_offset) { 2683 kernel_vm_end = kernel_map.max_offset; 2684 break; 2685 } 2686 } 2687 } 2688 2689 /* 2690 * Fill in the gaps. kernel_vm_end is only adjusted for ranges 2691 * below KERNBASE. Ranges above KERNBASE are kldloaded and we 2692 * do not want to force-fill 128G worth of page tables. 2693 */ 2694 if (kstart < KERNBASE) { 2695 if (kstart > kernel_vm_end) 2696 kstart = kernel_vm_end; 2697 KKASSERT(kend <= KERNBASE); 2698 update_kernel_vm_end = 1; 2699 } else { 2700 update_kernel_vm_end = 0; 2701 } 2702 2703 kstart = rounddown2(kstart, PAGE_SIZE * NPTEPG); 2704 kend = roundup2(kend, PAGE_SIZE * NPTEPG); 2705 2706 if (kend - 1 >= kernel_map.max_offset) 2707 kend = kernel_map.max_offset; 2708 2709 while (kstart < kend) { 2710 pt = pmap_pt(&kernel_pmap, kstart); 2711 if (pt == NULL) { 2712 /* We need a new PDP entry */ 2713 nkpg = vm_page_alloc(NULL, nkpt, 2714 VM_ALLOC_NORMAL | 2715 VM_ALLOC_SYSTEM | 2716 VM_ALLOC_INTERRUPT); 2717 if (nkpg == NULL) { 2718 panic("pmap_growkernel: no memory to grow " 2719 "kernel"); 2720 } 2721 paddr = VM_PAGE_TO_PHYS(nkpg); 2722 if ((nkpg->flags & PG_ZERO) == 0) 2723 pmap_zero_page(paddr); 2724 vm_page_flag_clear(nkpg, PG_ZERO); 2725 newpd = (pdp_entry_t) 2726 (paddr | 2727 kernel_pmap.pmap_bits[PG_V_IDX] | 2728 kernel_pmap.pmap_bits[PG_RW_IDX] | 2729 kernel_pmap.pmap_bits[PG_A_IDX] | 2730 kernel_pmap.pmap_bits[PG_M_IDX]); 2731 *pmap_pd(&kernel_pmap, kstart) = newpd; 2732 nkpt++; 2733 continue; /* try again */ 2734 } 2735 if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) { 2736 kstart = (kstart + PAGE_SIZE * NPTEPG) & 2737 ~(PAGE_SIZE * NPTEPG - 1); 2738 if (kstart - 1 >= kernel_map.max_offset) { 2739 kstart = kernel_map.max_offset; 2740 break; 2741 } 2742 continue; 2743 } 2744 2745 /* 2746 * This index is bogus, but out of the way 2747 */ 2748 nkpg = vm_page_alloc(NULL, nkpt, 2749 VM_ALLOC_NORMAL | 2750 VM_ALLOC_SYSTEM | 2751 VM_ALLOC_INTERRUPT); 2752 if (nkpg == NULL) 2753 panic("pmap_growkernel: no memory to grow kernel"); 2754 2755 vm_page_wire(nkpg); 2756 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 2757 pmap_zero_page(ptppaddr); 2758 vm_page_flag_clear(nkpg, PG_ZERO); 2759 newpt = (pd_entry_t) (ptppaddr | 2760 kernel_pmap.pmap_bits[PG_V_IDX] | 2761 kernel_pmap.pmap_bits[PG_RW_IDX] | 2762 kernel_pmap.pmap_bits[PG_A_IDX] | 2763 kernel_pmap.pmap_bits[PG_M_IDX]); 2764 *pmap_pt(&kernel_pmap, kstart) = newpt; 2765 nkpt++; 2766 2767 kstart = (kstart + PAGE_SIZE * NPTEPG) & 2768 ~(PAGE_SIZE * NPTEPG - 1); 2769 2770 if (kstart - 1 >= kernel_map.max_offset) { 2771 kstart = kernel_map.max_offset; 2772 break; 2773 } 2774 } 2775 2776 /* 2777 * Only update kernel_vm_end for areas below KERNBASE. 2778 */ 2779 if (update_kernel_vm_end && kernel_vm_end < kstart) 2780 kernel_vm_end = kstart; 2781 } 2782 2783 /* 2784 * Add a reference to the specified pmap. 2785 */ 2786 void 2787 pmap_reference(pmap_t pmap) 2788 { 2789 if (pmap != NULL) { 2790 lwkt_gettoken(&pmap->pm_token); 2791 ++pmap->pm_count; 2792 lwkt_reltoken(&pmap->pm_token); 2793 } 2794 } 2795 2796 /*************************************************** 2797 * page management routines. 2798 ***************************************************/ 2799 2800 /* 2801 * Hold a pv without locking it 2802 */ 2803 static void 2804 pv_hold(pv_entry_t pv) 2805 { 2806 atomic_add_int(&pv->pv_hold, 1); 2807 } 2808 2809 /* 2810 * Hold a pv_entry, preventing its destruction. TRUE is returned if the pv 2811 * was successfully locked, FALSE if it wasn't. The caller must dispose of 2812 * the pv properly. 2813 * 2814 * Either the pmap->pm_spin or the related vm_page_spin (if traversing a 2815 * pv list via its page) must be held by the caller. 2816 */ 2817 static int 2818 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL) 2819 { 2820 u_int count; 2821 2822 /* 2823 * Critical path shortcut expects pv to already have one ref 2824 * (for the pv->pv_pmap). 2825 */ 2826 if (atomic_cmpset_int(&pv->pv_hold, 1, PV_HOLD_LOCKED | 2)) { 2827 #ifdef PMAP_DEBUG 2828 pv->pv_func = func; 2829 pv->pv_line = lineno; 2830 #endif 2831 return TRUE; 2832 } 2833 2834 for (;;) { 2835 count = pv->pv_hold; 2836 cpu_ccfence(); 2837 if ((count & PV_HOLD_LOCKED) == 0) { 2838 if (atomic_cmpset_int(&pv->pv_hold, count, 2839 (count + 1) | PV_HOLD_LOCKED)) { 2840 #ifdef PMAP_DEBUG 2841 pv->pv_func = func; 2842 pv->pv_line = lineno; 2843 #endif 2844 return TRUE; 2845 } 2846 } else { 2847 if (atomic_cmpset_int(&pv->pv_hold, count, count + 1)) 2848 return FALSE; 2849 } 2850 /* retry */ 2851 } 2852 } 2853 2854 /* 2855 * Drop a previously held pv_entry which could not be locked, allowing its 2856 * destruction. 2857 * 2858 * Must not be called with a spinlock held as we might zfree() the pv if it 2859 * is no longer associated with a pmap and this was the last hold count. 2860 */ 2861 static void 2862 pv_drop(pv_entry_t pv) 2863 { 2864 u_int count; 2865 2866 for (;;) { 2867 count = pv->pv_hold; 2868 cpu_ccfence(); 2869 KKASSERT((count & PV_HOLD_MASK) > 0); 2870 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) != 2871 (PV_HOLD_LOCKED | 1)); 2872 if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) { 2873 if ((count & PV_HOLD_MASK) == 1) { 2874 #ifdef PMAP_DEBUG2 2875 if (pmap_enter_debug > 0) { 2876 --pmap_enter_debug; 2877 kprintf("pv_drop: free pv %p\n", pv); 2878 } 2879 #endif 2880 KKASSERT(count == 1); 2881 KKASSERT(pv->pv_pmap == NULL); 2882 zfree(pvzone, pv); 2883 } 2884 return; 2885 } 2886 /* retry */ 2887 } 2888 } 2889 2890 /* 2891 * Find or allocate the requested PV entry, returning a locked, held pv. 2892 * 2893 * If (*isnew) is non-zero, the returned pv will have two hold counts, one 2894 * for the caller and one representing the pmap and vm_page association. 2895 * 2896 * If (*isnew) is zero, the returned pv will have only one hold count. 2897 * 2898 * Since both associations can only be adjusted while the pv is locked, 2899 * together they represent just one additional hold. 2900 */ 2901 static 2902 pv_entry_t 2903 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL) 2904 { 2905 pv_entry_t pv; 2906 pv_entry_t pnew = NULL; 2907 2908 spin_lock(&pmap->pm_spin); 2909 for (;;) { 2910 if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) { 2911 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, 2912 pindex); 2913 } 2914 if (pv == NULL) { 2915 if (pnew == NULL) { 2916 spin_unlock(&pmap->pm_spin); 2917 pnew = zalloc(pvzone); 2918 spin_lock(&pmap->pm_spin); 2919 continue; 2920 } 2921 pnew->pv_pmap = pmap; 2922 pnew->pv_pindex = pindex; 2923 pnew->pv_hold = PV_HOLD_LOCKED | 2; 2924 #ifdef PMAP_DEBUG 2925 pnew->pv_func = func; 2926 pnew->pv_line = lineno; 2927 #endif 2928 pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew); 2929 ++pmap->pm_generation; 2930 atomic_add_long(&pmap->pm_stats.resident_count, 1); 2931 spin_unlock(&pmap->pm_spin); 2932 *isnew = 1; 2933 return(pnew); 2934 } 2935 if (pnew) { 2936 spin_unlock(&pmap->pm_spin); 2937 zfree(pvzone, pnew); 2938 pnew = NULL; 2939 spin_lock(&pmap->pm_spin); 2940 continue; 2941 } 2942 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) { 2943 spin_unlock(&pmap->pm_spin); 2944 } else { 2945 spin_unlock(&pmap->pm_spin); 2946 _pv_lock(pv PMAP_DEBUG_COPY); 2947 } 2948 if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) { 2949 *isnew = 0; 2950 return(pv); 2951 } 2952 pv_put(pv); 2953 spin_lock(&pmap->pm_spin); 2954 } 2955 } 2956 2957 /* 2958 * Find the requested PV entry, returning a locked+held pv or NULL 2959 */ 2960 static 2961 pv_entry_t 2962 _pv_get(pmap_t pmap, vm_pindex_t pindex PMAP_DEBUG_DECL) 2963 { 2964 pv_entry_t pv; 2965 2966 spin_lock(&pmap->pm_spin); 2967 for (;;) { 2968 /* 2969 * Shortcut cache 2970 */ 2971 if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) { 2972 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, 2973 pindex); 2974 } 2975 if (pv == NULL) { 2976 spin_unlock(&pmap->pm_spin); 2977 return NULL; 2978 } 2979 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) { 2980 spin_unlock(&pmap->pm_spin); 2981 } else { 2982 spin_unlock(&pmap->pm_spin); 2983 _pv_lock(pv PMAP_DEBUG_COPY); 2984 } 2985 if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) { 2986 pv_cache(pv, pindex); 2987 return(pv); 2988 } 2989 pv_put(pv); 2990 spin_lock(&pmap->pm_spin); 2991 } 2992 } 2993 2994 /* 2995 * Lookup, hold, and attempt to lock (pmap,pindex). 2996 * 2997 * If the entry does not exist NULL is returned and *errorp is set to 0 2998 * 2999 * If the entry exists and could be successfully locked it is returned and 3000 * errorp is set to 0. 3001 * 3002 * If the entry exists but could NOT be successfully locked it is returned 3003 * held and *errorp is set to 1. 3004 */ 3005 static 3006 pv_entry_t 3007 pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp) 3008 { 3009 pv_entry_t pv; 3010 3011 spin_lock_shared(&pmap->pm_spin); 3012 if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) 3013 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex); 3014 if (pv == NULL) { 3015 spin_unlock_shared(&pmap->pm_spin); 3016 *errorp = 0; 3017 return NULL; 3018 } 3019 if (pv_hold_try(pv)) { 3020 pv_cache(pv, pindex); 3021 spin_unlock_shared(&pmap->pm_spin); 3022 *errorp = 0; 3023 KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex); 3024 return(pv); /* lock succeeded */ 3025 } 3026 spin_unlock_shared(&pmap->pm_spin); 3027 *errorp = 1; 3028 return (pv); /* lock failed */ 3029 } 3030 3031 /* 3032 * Find the requested PV entry, returning a held pv or NULL 3033 */ 3034 static 3035 pv_entry_t 3036 pv_find(pmap_t pmap, vm_pindex_t pindex) 3037 { 3038 pv_entry_t pv; 3039 3040 spin_lock_shared(&pmap->pm_spin); 3041 3042 if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) 3043 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex); 3044 if (pv == NULL) { 3045 spin_unlock_shared(&pmap->pm_spin); 3046 return NULL; 3047 } 3048 pv_hold(pv); 3049 pv_cache(pv, pindex); 3050 spin_unlock_shared(&pmap->pm_spin); 3051 return(pv); 3052 } 3053 3054 /* 3055 * Lock a held pv, keeping the hold count 3056 */ 3057 static 3058 void 3059 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL) 3060 { 3061 u_int count; 3062 3063 for (;;) { 3064 count = pv->pv_hold; 3065 cpu_ccfence(); 3066 if ((count & PV_HOLD_LOCKED) == 0) { 3067 if (atomic_cmpset_int(&pv->pv_hold, count, 3068 count | PV_HOLD_LOCKED)) { 3069 #ifdef PMAP_DEBUG 3070 pv->pv_func = func; 3071 pv->pv_line = lineno; 3072 #endif 3073 return; 3074 } 3075 continue; 3076 } 3077 tsleep_interlock(pv, 0); 3078 if (atomic_cmpset_int(&pv->pv_hold, count, 3079 count | PV_HOLD_WAITING)) { 3080 #ifdef PMAP_DEBUG 3081 kprintf("pv waiting on %s:%d\n", 3082 pv->pv_func, pv->pv_line); 3083 #endif 3084 tsleep(pv, PINTERLOCKED, "pvwait", hz); 3085 } 3086 /* retry */ 3087 } 3088 } 3089 3090 /* 3091 * Unlock a held and locked pv, keeping the hold count. 3092 */ 3093 static 3094 void 3095 pv_unlock(pv_entry_t pv) 3096 { 3097 u_int count; 3098 3099 for (;;) { 3100 count = pv->pv_hold; 3101 cpu_ccfence(); 3102 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >= 3103 (PV_HOLD_LOCKED | 1)); 3104 if (atomic_cmpset_int(&pv->pv_hold, count, 3105 count & 3106 ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) { 3107 if (count & PV_HOLD_WAITING) 3108 wakeup(pv); 3109 break; 3110 } 3111 } 3112 } 3113 3114 /* 3115 * Unlock and drop a pv. If the pv is no longer associated with a pmap 3116 * and the hold count drops to zero we will free it. 3117 * 3118 * Caller should not hold any spin locks. We are protected from hold races 3119 * by virtue of holds only occuring only with a pmap_spin or vm_page_spin 3120 * lock held. A pv cannot be located otherwise. 3121 */ 3122 static 3123 void 3124 pv_put(pv_entry_t pv) 3125 { 3126 #ifdef PMAP_DEBUG2 3127 if (pmap_enter_debug > 0) { 3128 --pmap_enter_debug; 3129 kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold); 3130 } 3131 #endif 3132 3133 /* 3134 * Fast - shortcut most common condition 3135 */ 3136 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1)) 3137 return; 3138 3139 /* 3140 * Slow 3141 */ 3142 pv_unlock(pv); 3143 pv_drop(pv); 3144 } 3145 3146 /* 3147 * Remove the pmap association from a pv, require that pv_m already be removed, 3148 * then unlock and drop the pv. Any pte operations must have already been 3149 * completed. This call may result in a last-drop which will physically free 3150 * the pv. 3151 * 3152 * Removing the pmap association entails an additional drop. 3153 * 3154 * pv must be exclusively locked on call and will be disposed of on return. 3155 */ 3156 static 3157 void 3158 pv_free(pv_entry_t pv) 3159 { 3160 pmap_t pmap; 3161 3162 KKASSERT(pv->pv_m == NULL); 3163 KKASSERT((pv->pv_hold & PV_HOLD_MASK) >= 2); 3164 if ((pmap = pv->pv_pmap) != NULL) { 3165 spin_lock(&pmap->pm_spin); 3166 pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv); 3167 ++pmap->pm_generation; 3168 if (pmap->pm_pvhint == pv) 3169 pmap->pm_pvhint = NULL; 3170 atomic_add_long(&pmap->pm_stats.resident_count, -1); 3171 pv->pv_pmap = NULL; 3172 pv->pv_pindex = 0; 3173 spin_unlock(&pmap->pm_spin); 3174 3175 /* 3176 * Try to shortcut three atomic ops, otherwise fall through 3177 * and do it normally. Drop two refs and the lock all in 3178 * one go. 3179 */ 3180 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) { 3181 #ifdef PMAP_DEBUG2 3182 if (pmap_enter_debug > 0) { 3183 --pmap_enter_debug; 3184 kprintf("pv_free: free pv %p\n", pv); 3185 } 3186 #endif 3187 zfree(pvzone, pv); 3188 return; 3189 } 3190 pv_drop(pv); /* ref for pv_pmap */ 3191 } 3192 pv_put(pv); 3193 } 3194 3195 /* 3196 * This routine is very drastic, but can save the system 3197 * in a pinch. 3198 */ 3199 void 3200 pmap_collect(void) 3201 { 3202 int i; 3203 vm_page_t m; 3204 static int warningdone=0; 3205 3206 if (pmap_pagedaemon_waken == 0) 3207 return; 3208 pmap_pagedaemon_waken = 0; 3209 if (warningdone < 5) { 3210 kprintf("pmap_collect: collecting pv entries -- " 3211 "suggest increasing PMAP_SHPGPERPROC\n"); 3212 warningdone++; 3213 } 3214 3215 for (i = 0; i < vm_page_array_size; i++) { 3216 m = &vm_page_array[i]; 3217 if (m->wire_count || m->hold_count) 3218 continue; 3219 if (vm_page_busy_try(m, TRUE) == 0) { 3220 if (m->wire_count == 0 && m->hold_count == 0) { 3221 pmap_remove_all(m); 3222 } 3223 vm_page_wakeup(m); 3224 } 3225 } 3226 } 3227 3228 /* 3229 * Scan the pmap for active page table entries and issue a callback. 3230 * The callback must dispose of pte_pv, whos PTE entry is at *ptep in 3231 * its parent page table. 3232 * 3233 * pte_pv will be NULL if the page or page table is unmanaged. 3234 * pt_pv will point to the page table page containing the pte for the page. 3235 * 3236 * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page), 3237 * we pass a NULL pte_pv and we pass a pt_pv pointing to the passed 3238 * process pmap's PD and page to the callback function. This can be 3239 * confusing because the pt_pv is really a pd_pv, and the target page 3240 * table page is simply aliased by the pmap and not owned by it. 3241 * 3242 * It is assumed that the start and end are properly rounded to the page size. 3243 * 3244 * It is assumed that PD pages and above are managed and thus in the RB tree, 3245 * allowing us to use RB_SCAN from the PD pages down for ranged scans. 3246 */ 3247 struct pmap_scan_info { 3248 struct pmap *pmap; 3249 vm_offset_t sva; 3250 vm_offset_t eva; 3251 vm_pindex_t sva_pd_pindex; 3252 vm_pindex_t eva_pd_pindex; 3253 void (*func)(pmap_t, struct pmap_scan_info *, 3254 pv_entry_t, pv_entry_t, int, vm_offset_t, 3255 pt_entry_t *, void *); 3256 void *arg; 3257 int doinval; 3258 struct pmap_inval_info inval; 3259 }; 3260 3261 static int pmap_scan_cmp(pv_entry_t pv, void *data); 3262 static int pmap_scan_callback(pv_entry_t pv, void *data); 3263 3264 static void 3265 pmap_scan(struct pmap_scan_info *info) 3266 { 3267 struct pmap *pmap = info->pmap; 3268 pv_entry_t pd_pv; /* A page directory PV */ 3269 pv_entry_t pt_pv; /* A page table PV */ 3270 pv_entry_t pte_pv; /* A page table entry PV */ 3271 pt_entry_t *ptep; 3272 pt_entry_t oldpte; 3273 struct pv_entry dummy_pv; 3274 int generation; 3275 3276 if (pmap == NULL) 3277 return; 3278 3279 /* 3280 * Hold the token for stability; if the pmap is empty we have nothing 3281 * to do. 3282 */ 3283 lwkt_gettoken(&pmap->pm_token); 3284 #if 0 3285 if (pmap->pm_stats.resident_count == 0) { 3286 lwkt_reltoken(&pmap->pm_token); 3287 return; 3288 } 3289 #endif 3290 3291 pmap_inval_init(&info->inval); 3292 3293 again: 3294 /* 3295 * Special handling for scanning one page, which is a very common 3296 * operation (it is?). 3297 * 3298 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4 3299 */ 3300 if (info->sva + PAGE_SIZE == info->eva) { 3301 generation = pmap->pm_generation; 3302 if (info->sva >= VM_MAX_USER_ADDRESS) { 3303 /* 3304 * Kernel mappings do not track wire counts on 3305 * page table pages and only maintain pd_pv and 3306 * pte_pv levels so pmap_scan() works. 3307 */ 3308 pt_pv = NULL; 3309 pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva)); 3310 ptep = vtopte(info->sva); 3311 } else { 3312 /* 3313 * User pages which are unmanaged will not have a 3314 * pte_pv. User page table pages which are unmanaged 3315 * (shared from elsewhere) will also not have a pt_pv. 3316 * The func() callback will pass both pte_pv and pt_pv 3317 * as NULL in that case. 3318 */ 3319 pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva)); 3320 pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva)); 3321 if (pt_pv == NULL) { 3322 KKASSERT(pte_pv == NULL); 3323 pd_pv = pv_get(pmap, pmap_pd_pindex(info->sva)); 3324 if (pd_pv) { 3325 ptep = pv_pte_lookup(pd_pv, 3326 pmap_pt_index(info->sva)); 3327 if (*ptep) { 3328 info->func(pmap, info, 3329 NULL, pd_pv, 1, 3330 info->sva, ptep, 3331 info->arg); 3332 } 3333 pv_put(pd_pv); 3334 } 3335 goto fast_skip; 3336 } 3337 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva)); 3338 } 3339 3340 /* 3341 * NOTE: *ptep can't be ripped out from under us if we hold 3342 * pte_pv locked, but bits can change. However, there is 3343 * a race where another thread may be inserting pte_pv 3344 * and setting *ptep just after our pte_pv lookup fails. 3345 * 3346 * In this situation we can end up with a NULL pte_pv 3347 * but find that we have a managed *ptep. We explicitly 3348 * check for this race. 3349 */ 3350 oldpte = *ptep; 3351 cpu_ccfence(); 3352 if (oldpte == 0) { 3353 /* 3354 * Unlike the pv_find() case below we actually 3355 * acquired a locked pv in this case so any 3356 * race should have been resolved. It is expected 3357 * to not exist. 3358 */ 3359 KKASSERT(pte_pv == NULL); 3360 } else if (pte_pv) { 3361 KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | 3362 pmap->pmap_bits[PG_V_IDX])) == 3363 (pmap->pmap_bits[PG_MANAGED_IDX] | 3364 pmap->pmap_bits[PG_V_IDX]), 3365 ("badA *ptep %016lx/%016lx sva %016lx pte_pv %p" 3366 "generation %d/%d", 3367 *ptep, oldpte, info->sva, pte_pv, 3368 generation, pmap->pm_generation)); 3369 info->func(pmap, info, pte_pv, pt_pv, 0, 3370 info->sva, ptep, info->arg); 3371 } else { 3372 /* 3373 * Check for insertion race 3374 */ 3375 if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) && 3376 pt_pv) { 3377 pte_pv = pv_find(pmap, 3378 pmap_pte_pindex(info->sva)); 3379 if (pte_pv) { 3380 pv_drop(pte_pv); 3381 pv_put(pt_pv); 3382 kprintf("pmap_scan: RACE1 " 3383 "%016jx, %016lx\n", 3384 info->sva, oldpte); 3385 goto again; 3386 } 3387 } 3388 3389 /* 3390 * Didn't race 3391 */ 3392 KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | 3393 pmap->pmap_bits[PG_V_IDX])) == 3394 pmap->pmap_bits[PG_V_IDX], 3395 ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL" 3396 "generation %d/%d", 3397 *ptep, oldpte, info->sva, 3398 generation, pmap->pm_generation)); 3399 info->func(pmap, info, NULL, pt_pv, 0, 3400 info->sva, ptep, info->arg); 3401 } 3402 if (pt_pv) 3403 pv_put(pt_pv); 3404 fast_skip: 3405 pmap_inval_done(&info->inval); 3406 lwkt_reltoken(&pmap->pm_token); 3407 return; 3408 } 3409 3410 /* 3411 * Nominal scan case, RB_SCAN() for PD pages and iterate from 3412 * there. 3413 */ 3414 info->sva_pd_pindex = pmap_pd_pindex(info->sva); 3415 info->eva_pd_pindex = pmap_pd_pindex(info->eva + NBPDP - 1); 3416 3417 if (info->sva >= VM_MAX_USER_ADDRESS) { 3418 /* 3419 * The kernel does not currently maintain any pv_entry's for 3420 * higher-level page tables. 3421 */ 3422 bzero(&dummy_pv, sizeof(dummy_pv)); 3423 dummy_pv.pv_pindex = info->sva_pd_pindex; 3424 spin_lock(&pmap->pm_spin); 3425 while (dummy_pv.pv_pindex < info->eva_pd_pindex) { 3426 pmap_scan_callback(&dummy_pv, info); 3427 ++dummy_pv.pv_pindex; 3428 } 3429 spin_unlock(&pmap->pm_spin); 3430 } else { 3431 /* 3432 * User page tables maintain local PML4, PDP, and PD 3433 * pv_entry's at the very least. PT pv's might be 3434 * unmanaged and thus not exist. PTE pv's might be 3435 * unmanaged and thus not exist. 3436 */ 3437 spin_lock(&pmap->pm_spin); 3438 pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot, 3439 pmap_scan_cmp, pmap_scan_callback, info); 3440 spin_unlock(&pmap->pm_spin); 3441 } 3442 pmap_inval_done(&info->inval); 3443 lwkt_reltoken(&pmap->pm_token); 3444 } 3445 3446 /* 3447 * WARNING! pmap->pm_spin held 3448 */ 3449 static int 3450 pmap_scan_cmp(pv_entry_t pv, void *data) 3451 { 3452 struct pmap_scan_info *info = data; 3453 if (pv->pv_pindex < info->sva_pd_pindex) 3454 return(-1); 3455 if (pv->pv_pindex >= info->eva_pd_pindex) 3456 return(1); 3457 return(0); 3458 } 3459 3460 /* 3461 * WARNING! pmap->pm_spin held 3462 */ 3463 static int 3464 pmap_scan_callback(pv_entry_t pv, void *data) 3465 { 3466 struct pmap_scan_info *info = data; 3467 struct pmap *pmap = info->pmap; 3468 pv_entry_t pd_pv; /* A page directory PV */ 3469 pv_entry_t pt_pv; /* A page table PV */ 3470 pv_entry_t pte_pv; /* A page table entry PV */ 3471 pt_entry_t *ptep; 3472 pt_entry_t oldpte; 3473 vm_offset_t sva; 3474 vm_offset_t eva; 3475 vm_offset_t va_next; 3476 vm_pindex_t pd_pindex; 3477 int error; 3478 int generation; 3479 3480 /* 3481 * Pull the PD pindex from the pv before releasing the spinlock. 3482 * 3483 * WARNING: pv is faked for kernel pmap scans. 3484 */ 3485 pd_pindex = pv->pv_pindex; 3486 spin_unlock(&pmap->pm_spin); 3487 pv = NULL; /* invalid after spinlock unlocked */ 3488 3489 /* 3490 * Calculate the page range within the PD. SIMPLE pmaps are 3491 * direct-mapped for the entire 2^64 address space. Normal pmaps 3492 * reflect the user and kernel address space which requires 3493 * cannonicalization w/regards to converting pd_pindex's back 3494 * into addresses. 3495 */ 3496 sva = (pd_pindex - NUPTE_TOTAL - NUPT_TOTAL) << PDPSHIFT; 3497 if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 && 3498 (sva & PML4_SIGNMASK)) { 3499 sva |= PML4_SIGNMASK; 3500 } 3501 eva = sva + NBPDP; /* can overflow */ 3502 if (sva < info->sva) 3503 sva = info->sva; 3504 if (eva < info->sva || eva > info->eva) 3505 eva = info->eva; 3506 3507 /* 3508 * NOTE: kernel mappings do not track page table pages, only 3509 * terminal pages. 3510 * 3511 * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4. 3512 * However, for the scan to be efficient we try to 3513 * cache items top-down. 3514 */ 3515 pd_pv = NULL; 3516 pt_pv = NULL; 3517 3518 for (; sva < eva; sva = va_next) { 3519 if (sva >= VM_MAX_USER_ADDRESS) { 3520 if (pt_pv) { 3521 pv_put(pt_pv); 3522 pt_pv = NULL; 3523 } 3524 goto kernel_skip; 3525 } 3526 3527 /* 3528 * PD cache (degenerate case if we skip). It is possible 3529 * for the PD to not exist due to races. This is ok. 3530 */ 3531 if (pd_pv == NULL) { 3532 pd_pv = pv_get(pmap, pmap_pd_pindex(sva)); 3533 } else if (pd_pv->pv_pindex != pmap_pd_pindex(sva)) { 3534 pv_put(pd_pv); 3535 pd_pv = pv_get(pmap, pmap_pd_pindex(sva)); 3536 } 3537 if (pd_pv == NULL) { 3538 va_next = (sva + NBPDP) & ~PDPMASK; 3539 if (va_next < sva) 3540 va_next = eva; 3541 continue; 3542 } 3543 3544 /* 3545 * PT cache 3546 */ 3547 if (pt_pv == NULL) { 3548 if (pd_pv) { 3549 pv_put(pd_pv); 3550 pd_pv = NULL; 3551 } 3552 pt_pv = pv_get(pmap, pmap_pt_pindex(sva)); 3553 } else if (pt_pv->pv_pindex != pmap_pt_pindex(sva)) { 3554 if (pd_pv) { 3555 pv_put(pd_pv); 3556 pd_pv = NULL; 3557 } 3558 pv_put(pt_pv); 3559 pt_pv = pv_get(pmap, pmap_pt_pindex(sva)); 3560 } 3561 3562 /* 3563 * If pt_pv is NULL we either have an shared page table 3564 * page and must issue a callback specific to that case, 3565 * or there is no page table page. 3566 * 3567 * Either way we can skip the page table page. 3568 */ 3569 if (pt_pv == NULL) { 3570 /* 3571 * Possible unmanaged (shared from another pmap) 3572 * page table page. 3573 */ 3574 if (pd_pv == NULL) 3575 pd_pv = pv_get(pmap, pmap_pd_pindex(sva)); 3576 KKASSERT(pd_pv != NULL); 3577 ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva)); 3578 if (*ptep & pmap->pmap_bits[PG_V_IDX]) { 3579 info->func(pmap, info, NULL, pd_pv, 1, 3580 sva, ptep, info->arg); 3581 } 3582 3583 /* 3584 * Done, move to next page table page. 3585 */ 3586 va_next = (sva + NBPDR) & ~PDRMASK; 3587 if (va_next < sva) 3588 va_next = eva; 3589 continue; 3590 } 3591 3592 /* 3593 * From this point in the loop testing pt_pv for non-NULL 3594 * means we are in UVM, else if it is NULL we are in KVM. 3595 * 3596 * Limit our scan to either the end of the va represented 3597 * by the current page table page, or to the end of the 3598 * range being removed. 3599 */ 3600 kernel_skip: 3601 va_next = (sva + NBPDR) & ~PDRMASK; 3602 if (va_next < sva) 3603 va_next = eva; 3604 if (va_next > eva) 3605 va_next = eva; 3606 3607 /* 3608 * Scan the page table for pages. Some pages may not be 3609 * managed (might not have a pv_entry). 3610 * 3611 * There is no page table management for kernel pages so 3612 * pt_pv will be NULL in that case, but otherwise pt_pv 3613 * is non-NULL, locked, and referenced. 3614 */ 3615 3616 /* 3617 * At this point a non-NULL pt_pv means a UVA, and a NULL 3618 * pt_pv means a KVA. 3619 */ 3620 if (pt_pv) 3621 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva)); 3622 else 3623 ptep = vtopte(sva); 3624 3625 while (sva < va_next) { 3626 /* 3627 * Acquire the related pte_pv, if any. If *ptep == 0 3628 * the related pte_pv should not exist, but if *ptep 3629 * is not zero the pte_pv may or may not exist (e.g. 3630 * will not exist for an unmanaged page). 3631 * 3632 * However a multitude of races are possible here. 3633 * 3634 * In addition, the (pt_pv, pte_pv) lock order is 3635 * backwards, so we have to be careful in aquiring 3636 * a properly locked pte_pv. 3637 */ 3638 generation = pmap->pm_generation; 3639 if (pt_pv) { 3640 pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva), 3641 &error); 3642 if (error) { 3643 if (pd_pv) { 3644 pv_put(pd_pv); 3645 pd_pv = NULL; 3646 } 3647 pv_put(pt_pv); /* must be non-NULL */ 3648 pt_pv = NULL; 3649 pv_lock(pte_pv); /* safe to block now */ 3650 pv_put(pte_pv); 3651 pte_pv = NULL; 3652 pt_pv = pv_get(pmap, 3653 pmap_pt_pindex(sva)); 3654 /* 3655 * pt_pv reloaded, need new ptep 3656 */ 3657 KKASSERT(pt_pv != NULL); 3658 ptep = pv_pte_lookup(pt_pv, 3659 pmap_pte_index(sva)); 3660 continue; 3661 } 3662 } else { 3663 pte_pv = pv_get(pmap, pmap_pte_pindex(sva)); 3664 } 3665 3666 /* 3667 * Ok, if *ptep == 0 we had better NOT have a pte_pv. 3668 */ 3669 oldpte = *ptep; 3670 if (oldpte == 0) { 3671 if (pte_pv) { 3672 kprintf("Unexpected non-NULL pte_pv " 3673 "%p pt_pv %p " 3674 "*ptep = %016lx/%016lx\n", 3675 pte_pv, pt_pv, *ptep, oldpte); 3676 panic("Unexpected non-NULL pte_pv"); 3677 } 3678 sva += PAGE_SIZE; 3679 ++ptep; 3680 continue; 3681 } 3682 3683 /* 3684 * Ready for the callback. The locked pte_pv (if any) 3685 * is consumed by the callback. pte_pv will exist if 3686 * the page is managed, and will not exist if it 3687 * isn't. 3688 */ 3689 if (pte_pv) { 3690 KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) == 3691 (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX]), 3692 ("badC *ptep %016lx/%016lx sva %016lx " 3693 "pte_pv %p pm_generation %d/%d", 3694 *ptep, oldpte, sva, pte_pv, 3695 generation, pmap->pm_generation)); 3696 info->func(pmap, info, pte_pv, pt_pv, 0, 3697 sva, ptep, info->arg); 3698 } else { 3699 /* 3700 * Check for insertion race. Since there is no 3701 * pte_pv to guard us it is possible for us 3702 * to race another thread doing an insertion. 3703 * Our lookup misses the pte_pv but our *ptep 3704 * check sees the inserted pte. 3705 * 3706 * XXX panic case seems to occur within a 3707 * vm_fork() of /bin/sh, which frankly 3708 * shouldn't happen since no other threads 3709 * should be inserting to our pmap in that 3710 * situation. Removing, possibly. Inserting, 3711 * shouldn't happen. 3712 */ 3713 if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) && 3714 pt_pv) { 3715 pte_pv = pv_find(pmap, 3716 pmap_pte_pindex(sva)); 3717 if (pte_pv) { 3718 pv_drop(pte_pv); 3719 kprintf("pmap_scan: RACE2 " 3720 "%016jx, %016lx\n", 3721 sva, oldpte); 3722 continue; 3723 } 3724 } 3725 3726 /* 3727 * Didn't race 3728 */ 3729 KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) == 3730 pmap->pmap_bits[PG_V_IDX], 3731 ("badD *ptep %016lx/%016lx sva %016lx " 3732 "pte_pv NULL pm_generation %d/%d", 3733 *ptep, oldpte, sva, 3734 generation, pmap->pm_generation)); 3735 info->func(pmap, info, NULL, pt_pv, 0, 3736 sva, ptep, info->arg); 3737 } 3738 pte_pv = NULL; 3739 sva += PAGE_SIZE; 3740 ++ptep; 3741 } 3742 lwkt_yield(); 3743 } 3744 if (pd_pv) { 3745 pv_put(pd_pv); 3746 pd_pv = NULL; 3747 } 3748 if (pt_pv) { 3749 pv_put(pt_pv); 3750 pt_pv = NULL; 3751 } 3752 lwkt_yield(); 3753 3754 /* 3755 * Relock before returning. 3756 */ 3757 spin_lock(&pmap->pm_spin); 3758 return (0); 3759 } 3760 3761 void 3762 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 3763 { 3764 struct pmap_scan_info info; 3765 3766 info.pmap = pmap; 3767 info.sva = sva; 3768 info.eva = eva; 3769 info.func = pmap_remove_callback; 3770 info.arg = NULL; 3771 info.doinval = 1; /* normal remove requires pmap inval */ 3772 pmap_scan(&info); 3773 } 3774 3775 static void 3776 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 3777 { 3778 struct pmap_scan_info info; 3779 3780 info.pmap = pmap; 3781 info.sva = sva; 3782 info.eva = eva; 3783 info.func = pmap_remove_callback; 3784 info.arg = NULL; 3785 info.doinval = 0; /* normal remove requires pmap inval */ 3786 pmap_scan(&info); 3787 } 3788 3789 static void 3790 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info, 3791 pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept, 3792 vm_offset_t va, pt_entry_t *ptep, void *arg __unused) 3793 { 3794 pt_entry_t pte; 3795 3796 if (pte_pv) { 3797 /* 3798 * This will also drop pt_pv's wire_count. Note that 3799 * terminal pages are not wired based on mmu presence. 3800 */ 3801 if (info->doinval) 3802 pmap_remove_pv_pte(pte_pv, pt_pv, &info->inval); 3803 else 3804 pmap_remove_pv_pte(pte_pv, pt_pv, NULL); 3805 pmap_remove_pv_page(pte_pv); 3806 pv_free(pte_pv); 3807 } else if (sharept == 0) { 3808 /* 3809 * Unmanaged page table (pt, pd, or pdp. Not pte). 3810 * 3811 * pt_pv's wire_count is still bumped by unmanaged pages 3812 * so we must decrement it manually. 3813 * 3814 * We have to unwire the target page table page. 3815 * 3816 * It is unclear how we can invalidate a segment so we 3817 * invalidate -1 which invlidates the tlb. 3818 */ 3819 if (info->doinval) 3820 pmap_inval_interlock(&info->inval, pmap, -1); 3821 pte = pte_load_clear(ptep); 3822 if (info->doinval) 3823 pmap_inval_deinterlock(&info->inval, pmap); 3824 if (pte & pmap->pmap_bits[PG_W_IDX]) 3825 atomic_add_long(&pmap->pm_stats.wired_count, -1); 3826 atomic_add_long(&pmap->pm_stats.resident_count, -1); 3827 if (vm_page_unwire_quick(pt_pv->pv_m)) 3828 panic("pmap_remove: insufficient wirecount"); 3829 } else { 3830 /* 3831 * Unmanaged page table (pt, pd, or pdp. Not pte) for 3832 * a shared page table. 3833 * 3834 * pt_pv is actually the pd_pv for our pmap (not the shared 3835 * object pmap). 3836 * 3837 * We have to unwire the target page table page and we 3838 * have to unwire our page directory page. 3839 * 3840 * It is unclear how we can invalidate a segment so we 3841 * invalidate -1 which invlidates the tlb. 3842 */ 3843 if (info->doinval) 3844 pmap_inval_interlock(&info->inval, pmap, -1); 3845 pte = pte_load_clear(ptep); 3846 if (info->doinval) 3847 pmap_inval_deinterlock(&info->inval, pmap); 3848 atomic_add_long(&pmap->pm_stats.resident_count, -1); 3849 KKASSERT((pte & pmap->pmap_bits[PG_DEVICE_IDX]) == 0); 3850 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME))) 3851 panic("pmap_remove: shared pgtable1 bad wirecount"); 3852 if (vm_page_unwire_quick(pt_pv->pv_m)) 3853 panic("pmap_remove: shared pgtable2 bad wirecount"); 3854 } 3855 } 3856 3857 /* 3858 * Removes this physical page from all physical maps in which it resides. 3859 * Reflects back modify bits to the pager. 3860 * 3861 * This routine may not be called from an interrupt. 3862 */ 3863 static 3864 void 3865 pmap_remove_all(vm_page_t m) 3866 { 3867 struct pmap_inval_info info; 3868 pv_entry_t pv; 3869 3870 if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/) 3871 return; 3872 3873 pmap_inval_init(&info); 3874 vm_page_spin_lock(m); 3875 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3876 KKASSERT(pv->pv_m == m); 3877 if (pv_hold_try(pv)) { 3878 vm_page_spin_unlock(m); 3879 } else { 3880 vm_page_spin_unlock(m); 3881 pv_lock(pv); 3882 } 3883 if (pv->pv_m != m) { 3884 pv_put(pv); 3885 vm_page_spin_lock(m); 3886 continue; 3887 } 3888 3889 /* 3890 * Holding no spinlocks, pv is locked. 3891 */ 3892 pmap_remove_pv_pte(pv, NULL, &info); 3893 pmap_remove_pv_page(pv); 3894 pv_free(pv); 3895 vm_page_spin_lock(m); 3896 } 3897 KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0); 3898 vm_page_spin_unlock(m); 3899 pmap_inval_done(&info); 3900 } 3901 3902 /* 3903 * Set the physical protection on the specified range of this map 3904 * as requested. This function is typically only used for debug watchpoints 3905 * and COW pages. 3906 * 3907 * This function may not be called from an interrupt if the map is 3908 * not the kernel_pmap. 3909 * 3910 * NOTE! For shared page table pages we just unmap the page. 3911 */ 3912 void 3913 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 3914 { 3915 struct pmap_scan_info info; 3916 /* JG review for NX */ 3917 3918 if (pmap == NULL) 3919 return; 3920 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 3921 pmap_remove(pmap, sva, eva); 3922 return; 3923 } 3924 if (prot & VM_PROT_WRITE) 3925 return; 3926 info.pmap = pmap; 3927 info.sva = sva; 3928 info.eva = eva; 3929 info.func = pmap_protect_callback; 3930 info.arg = &prot; 3931 info.doinval = 1; 3932 pmap_scan(&info); 3933 } 3934 3935 static 3936 void 3937 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info, 3938 pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept, 3939 vm_offset_t va, pt_entry_t *ptep, void *arg __unused) 3940 { 3941 pt_entry_t pbits; 3942 pt_entry_t cbits; 3943 pt_entry_t pte; 3944 vm_page_t m; 3945 3946 /* 3947 * XXX non-optimal. 3948 */ 3949 pmap_inval_interlock(&info->inval, pmap, va); 3950 again: 3951 pbits = *ptep; 3952 cbits = pbits; 3953 if (pte_pv) { 3954 m = NULL; 3955 if (pbits & pmap->pmap_bits[PG_A_IDX]) { 3956 if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) { 3957 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 3958 KKASSERT(m == pte_pv->pv_m); 3959 vm_page_flag_set(m, PG_REFERENCED); 3960 } 3961 cbits &= ~pmap->pmap_bits[PG_A_IDX]; 3962 } 3963 if (pbits & pmap->pmap_bits[PG_M_IDX]) { 3964 if (pmap_track_modified(pte_pv->pv_pindex)) { 3965 if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) { 3966 if (m == NULL) { 3967 m = PHYS_TO_VM_PAGE(pbits & 3968 PG_FRAME); 3969 } 3970 vm_page_dirty(m); 3971 } 3972 cbits &= ~pmap->pmap_bits[PG_M_IDX]; 3973 } 3974 } 3975 } else if (sharept) { 3976 /* 3977 * Unmanaged page table, pt_pv is actually the pd_pv 3978 * for our pmap (not the object's shared pmap). 3979 * 3980 * When asked to protect something in a shared page table 3981 * page we just unmap the page table page. We have to 3982 * invalidate the tlb in this situation. 3983 * 3984 * XXX Warning, shared page tables will not be used for 3985 * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings 3986 * so PHYS_TO_VM_PAGE() should be safe here. 3987 */ 3988 pte = pte_load_clear(ptep); 3989 pmap_inval_invltlb(&info->inval); 3990 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME))) 3991 panic("pmap_protect: pgtable1 pg bad wirecount"); 3992 if (vm_page_unwire_quick(pt_pv->pv_m)) 3993 panic("pmap_protect: pgtable2 pg bad wirecount"); 3994 ptep = NULL; 3995 } 3996 /* else unmanaged page, adjust bits, no wire changes */ 3997 3998 if (ptep) { 3999 cbits &= ~pmap->pmap_bits[PG_RW_IDX]; 4000 #ifdef PMAP_DEBUG2 4001 if (pmap_enter_debug > 0) { 4002 --pmap_enter_debug; 4003 kprintf("pmap_protect va=%lx ptep=%p pte_pv=%p " 4004 "pt_pv=%p cbits=%08lx\n", 4005 va, ptep, pte_pv, 4006 pt_pv, cbits 4007 ); 4008 } 4009 #endif 4010 if (pbits != cbits && !atomic_cmpset_long(ptep, pbits, cbits)) { 4011 goto again; 4012 } 4013 } 4014 pmap_inval_deinterlock(&info->inval, pmap); 4015 if (pte_pv) 4016 pv_put(pte_pv); 4017 } 4018 4019 /* 4020 * Insert the vm_page (m) at the virtual address (va), replacing any prior 4021 * mapping at that address. Set protection and wiring as requested. 4022 * 4023 * If entry is non-NULL we check to see if the SEG_SIZE optimization is 4024 * possible. If it is we enter the page into the appropriate shared pmap 4025 * hanging off the related VM object instead of the passed pmap, then we 4026 * share the page table page from the VM object's pmap into the current pmap. 4027 * 4028 * NOTE: This routine MUST insert the page into the pmap now, it cannot 4029 * lazy-evaluate. 4030 */ 4031 void 4032 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 4033 boolean_t wired, vm_map_entry_t entry) 4034 { 4035 pmap_inval_info info; 4036 pv_entry_t pt_pv; /* page table */ 4037 pv_entry_t pte_pv; /* page table entry */ 4038 pt_entry_t *ptep; 4039 vm_paddr_t opa; 4040 pt_entry_t origpte, newpte; 4041 vm_paddr_t pa; 4042 4043 if (pmap == NULL) 4044 return; 4045 va = trunc_page(va); 4046 #ifdef PMAP_DIAGNOSTIC 4047 if (va >= KvaEnd) 4048 panic("pmap_enter: toobig"); 4049 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 4050 panic("pmap_enter: invalid to pmap_enter page table " 4051 "pages (va: 0x%lx)", va); 4052 #endif 4053 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) { 4054 kprintf("Warning: pmap_enter called on UVA with " 4055 "kernel_pmap\n"); 4056 #ifdef DDB 4057 db_print_backtrace(); 4058 #endif 4059 } 4060 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) { 4061 kprintf("Warning: pmap_enter called on KVA without" 4062 "kernel_pmap\n"); 4063 #ifdef DDB 4064 db_print_backtrace(); 4065 #endif 4066 } 4067 4068 /* 4069 * Get locked PV entries for our new page table entry (pte_pv) 4070 * and for its parent page table (pt_pv). We need the parent 4071 * so we can resolve the location of the ptep. 4072 * 4073 * Only hardware MMU actions can modify the ptep out from 4074 * under us. 4075 * 4076 * if (m) is fictitious or unmanaged we do not create a managing 4077 * pte_pv for it. Any pre-existing page's management state must 4078 * match (avoiding code complexity). 4079 * 4080 * If the pmap is still being initialized we assume existing 4081 * page tables. 4082 * 4083 * Kernel mapppings do not track page table pages (i.e. pt_pv). 4084 */ 4085 if (pmap_initialized == FALSE) { 4086 pte_pv = NULL; 4087 pt_pv = NULL; 4088 ptep = vtopte(va); 4089 origpte = *ptep; 4090 } else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */ 4091 pte_pv = NULL; 4092 if (va >= VM_MAX_USER_ADDRESS) { 4093 pt_pv = NULL; 4094 ptep = vtopte(va); 4095 } else { 4096 pt_pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va), 4097 NULL, entry, va); 4098 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 4099 } 4100 origpte = *ptep; 4101 cpu_ccfence(); 4102 KASSERT(origpte == 0 || 4103 (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0, 4104 ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va)); 4105 } else { 4106 if (va >= VM_MAX_USER_ADDRESS) { 4107 /* 4108 * Kernel map, pv_entry-tracked. 4109 */ 4110 pt_pv = NULL; 4111 pte_pv = pmap_allocpte(pmap, pmap_pte_pindex(va), NULL); 4112 ptep = vtopte(va); 4113 } else { 4114 /* 4115 * User map 4116 */ 4117 pte_pv = pmap_allocpte_seg(pmap, pmap_pte_pindex(va), 4118 &pt_pv, entry, va); 4119 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va)); 4120 } 4121 origpte = *ptep; 4122 cpu_ccfence(); 4123 KASSERT(origpte == 0 || 4124 (origpte & pmap->pmap_bits[PG_MANAGED_IDX]), 4125 ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va)); 4126 } 4127 4128 pa = VM_PAGE_TO_PHYS(m); 4129 opa = origpte & PG_FRAME; 4130 4131 newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) | 4132 pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]); 4133 if (wired) 4134 newpte |= pmap->pmap_bits[PG_W_IDX]; 4135 if (va < VM_MAX_USER_ADDRESS) 4136 newpte |= pmap->pmap_bits[PG_U_IDX]; 4137 if (pte_pv) 4138 newpte |= pmap->pmap_bits[PG_MANAGED_IDX]; 4139 // if (pmap == &kernel_pmap) 4140 // newpte |= pgeflag; 4141 newpte |= pmap->pmap_cache_bits[m->pat_mode]; 4142 if (m->flags & PG_FICTITIOUS) 4143 newpte |= pmap->pmap_bits[PG_DEVICE_IDX]; 4144 4145 /* 4146 * It is possible for multiple faults to occur in threaded 4147 * environments, the existing pte might be correct. 4148 */ 4149 if (((origpte ^ newpte) & ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] | 4150 pmap->pmap_bits[PG_A_IDX])) == 0) 4151 goto done; 4152 4153 if ((prot & VM_PROT_NOSYNC) == 0) 4154 pmap_inval_init(&info); 4155 4156 /* 4157 * Ok, either the address changed or the protection or wiring 4158 * changed. 4159 * 4160 * Clear the current entry, interlocking the removal. For managed 4161 * pte's this will also flush the modified state to the vm_page. 4162 * Atomic ops are mandatory in order to ensure that PG_M events are 4163 * not lost during any transition. 4164 * 4165 * WARNING: The caller has busied the new page but not the original 4166 * vm_page which we are trying to replace. Because we hold 4167 * the pte_pv lock, but have not busied the page, PG bits 4168 * can be cleared out from under us. 4169 */ 4170 if (opa) { 4171 if (pte_pv) { 4172 /* 4173 * pmap_remove_pv_pte() unwires pt_pv and assumes 4174 * we will free pte_pv, but since we are reusing 4175 * pte_pv we want to retain the wire count. 4176 * 4177 * pt_pv won't exist for a kernel page (managed or 4178 * otherwise). 4179 */ 4180 if (pt_pv) 4181 vm_page_wire_quick(pt_pv->pv_m); 4182 if (prot & VM_PROT_NOSYNC) 4183 pmap_remove_pv_pte(pte_pv, pt_pv, NULL); 4184 else 4185 pmap_remove_pv_pte(pte_pv, pt_pv, &info); 4186 if (pte_pv->pv_m) 4187 pmap_remove_pv_page(pte_pv); 4188 } else if (prot & VM_PROT_NOSYNC) { 4189 /* 4190 * Unmanaged page, NOSYNC (no mmu sync) requested. 4191 * 4192 * Leave wire count on PT page intact. 4193 */ 4194 (void)pte_load_clear(ptep); 4195 cpu_invlpg((void *)va); 4196 atomic_add_long(&pmap->pm_stats.resident_count, -1); 4197 } else { 4198 /* 4199 * Unmanaged page, normal enter. 4200 * 4201 * Leave wire count on PT page intact. 4202 */ 4203 pmap_inval_interlock(&info, pmap, va); 4204 (void)pte_load_clear(ptep); 4205 pmap_inval_deinterlock(&info, pmap); 4206 atomic_add_long(&pmap->pm_stats.resident_count, -1); 4207 } 4208 KKASSERT(*ptep == 0); 4209 } 4210 4211 #ifdef PMAP_DEBUG2 4212 if (pmap_enter_debug > 0) { 4213 --pmap_enter_debug; 4214 kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p" 4215 " pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n", 4216 va, m, 4217 origpte, newpte, ptep, 4218 pte_pv, pt_pv, opa, prot); 4219 } 4220 #endif 4221 4222 if (pte_pv) { 4223 /* 4224 * Enter on the PV list if part of our managed memory. 4225 * Wiring of the PT page is already handled. 4226 */ 4227 KKASSERT(pte_pv->pv_m == NULL); 4228 vm_page_spin_lock(m); 4229 pte_pv->pv_m = m; 4230 pmap_page_stats_adding(m); 4231 TAILQ_INSERT_TAIL(&m->md.pv_list, pte_pv, pv_list); 4232 vm_page_flag_set(m, PG_MAPPED); 4233 vm_page_spin_unlock(m); 4234 } else if (pt_pv && opa == 0) { 4235 /* 4236 * We have to adjust the wire count on the PT page ourselves 4237 * for unmanaged entries. If opa was non-zero we retained 4238 * the existing wire count from the removal. 4239 */ 4240 vm_page_wire_quick(pt_pv->pv_m); 4241 } 4242 4243 /* 4244 * Kernel VMAs (pt_pv == NULL) require pmap invalidation interlocks. 4245 * 4246 * User VMAs do not because those will be zero->non-zero, so no 4247 * stale entries to worry about at this point. 4248 * 4249 * For KVM there appear to still be issues. Theoretically we 4250 * should be able to scrap the interlocks entirely but we 4251 * get crashes. 4252 */ 4253 if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL) 4254 pmap_inval_interlock(&info, pmap, va); 4255 4256 /* 4257 * Set the pte 4258 */ 4259 *(volatile pt_entry_t *)ptep = newpte; 4260 4261 if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL) 4262 pmap_inval_deinterlock(&info, pmap); 4263 else if (pt_pv == NULL) 4264 cpu_invlpg((void *)va); 4265 4266 if (wired) { 4267 if (pte_pv) { 4268 atomic_add_long(&pte_pv->pv_pmap->pm_stats.wired_count, 4269 1); 4270 } else { 4271 atomic_add_long(&pmap->pm_stats.wired_count, 1); 4272 } 4273 } 4274 if (newpte & pmap->pmap_bits[PG_RW_IDX]) 4275 vm_page_flag_set(m, PG_WRITEABLE); 4276 4277 /* 4278 * Unmanaged pages need manual resident_count tracking. 4279 */ 4280 if (pte_pv == NULL && pt_pv) 4281 atomic_add_long(&pt_pv->pv_pmap->pm_stats.resident_count, 1); 4282 4283 /* 4284 * Cleanup 4285 */ 4286 if ((prot & VM_PROT_NOSYNC) == 0 || pte_pv == NULL) 4287 pmap_inval_done(&info); 4288 done: 4289 KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 || 4290 (m->flags & PG_MAPPED)); 4291 4292 /* 4293 * Cleanup the pv entry, allowing other accessors. 4294 */ 4295 if (pte_pv) 4296 pv_put(pte_pv); 4297 if (pt_pv) 4298 pv_put(pt_pv); 4299 } 4300 4301 /* 4302 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired. 4303 * This code also assumes that the pmap has no pre-existing entry for this 4304 * VA. 4305 * 4306 * This code currently may only be used on user pmaps, not kernel_pmap. 4307 */ 4308 void 4309 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m) 4310 { 4311 pmap_enter(pmap, va, m, VM_PROT_READ, FALSE, NULL); 4312 } 4313 4314 /* 4315 * Make a temporary mapping for a physical address. This is only intended 4316 * to be used for panic dumps. 4317 * 4318 * The caller is responsible for calling smp_invltlb(). 4319 */ 4320 void * 4321 pmap_kenter_temporary(vm_paddr_t pa, long i) 4322 { 4323 pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa); 4324 return ((void *)crashdumpmap); 4325 } 4326 4327 #define MAX_INIT_PT (96) 4328 4329 /* 4330 * This routine preloads the ptes for a given object into the specified pmap. 4331 * This eliminates the blast of soft faults on process startup and 4332 * immediately after an mmap. 4333 */ 4334 static int pmap_object_init_pt_callback(vm_page_t p, void *data); 4335 4336 void 4337 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot, 4338 vm_object_t object, vm_pindex_t pindex, 4339 vm_size_t size, int limit) 4340 { 4341 struct rb_vm_page_scan_info info; 4342 struct lwp *lp; 4343 vm_size_t psize; 4344 4345 /* 4346 * We can't preinit if read access isn't set or there is no pmap 4347 * or object. 4348 */ 4349 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL) 4350 return; 4351 4352 /* 4353 * We can't preinit if the pmap is not the current pmap 4354 */ 4355 lp = curthread->td_lwp; 4356 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace)) 4357 return; 4358 4359 /* 4360 * Misc additional checks 4361 */ 4362 psize = x86_64_btop(size); 4363 4364 if ((object->type != OBJT_VNODE) || 4365 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 4366 (object->resident_page_count > MAX_INIT_PT))) { 4367 return; 4368 } 4369 4370 if (pindex + psize > object->size) { 4371 if (object->size < pindex) 4372 return; 4373 psize = object->size - pindex; 4374 } 4375 4376 if (psize == 0) 4377 return; 4378 4379 /* 4380 * If everything is segment-aligned do not pre-init here. Instead 4381 * allow the normal vm_fault path to pass a segment hint to 4382 * pmap_enter() which will then use an object-referenced shared 4383 * page table page. 4384 */ 4385 if ((addr & SEG_MASK) == 0 && 4386 (ctob(psize) & SEG_MASK) == 0 && 4387 (ctob(pindex) & SEG_MASK) == 0) { 4388 return; 4389 } 4390 4391 /* 4392 * Use a red-black scan to traverse the requested range and load 4393 * any valid pages found into the pmap. 4394 * 4395 * We cannot safely scan the object's memq without holding the 4396 * object token. 4397 */ 4398 info.start_pindex = pindex; 4399 info.end_pindex = pindex + psize - 1; 4400 info.limit = limit; 4401 info.mpte = NULL; 4402 info.addr = addr; 4403 info.pmap = pmap; 4404 4405 vm_object_hold_shared(object); 4406 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 4407 pmap_object_init_pt_callback, &info); 4408 vm_object_drop(object); 4409 } 4410 4411 static 4412 int 4413 pmap_object_init_pt_callback(vm_page_t p, void *data) 4414 { 4415 struct rb_vm_page_scan_info *info = data; 4416 vm_pindex_t rel_index; 4417 4418 /* 4419 * don't allow an madvise to blow away our really 4420 * free pages allocating pv entries. 4421 */ 4422 if ((info->limit & MAP_PREFAULT_MADVISE) && 4423 vmstats.v_free_count < vmstats.v_free_reserved) { 4424 return(-1); 4425 } 4426 4427 /* 4428 * Ignore list markers and ignore pages we cannot instantly 4429 * busy (while holding the object token). 4430 */ 4431 if (p->flags & PG_MARKER) 4432 return 0; 4433 if (vm_page_busy_try(p, TRUE)) 4434 return 0; 4435 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 4436 (p->flags & PG_FICTITIOUS) == 0) { 4437 if ((p->queue - p->pc) == PQ_CACHE) 4438 vm_page_deactivate(p); 4439 rel_index = p->pindex - info->start_pindex; 4440 pmap_enter_quick(info->pmap, 4441 info->addr + x86_64_ptob(rel_index), p); 4442 } 4443 vm_page_wakeup(p); 4444 lwkt_yield(); 4445 return(0); 4446 } 4447 4448 /* 4449 * Return TRUE if the pmap is in shape to trivially pre-fault the specified 4450 * address. 4451 * 4452 * Returns FALSE if it would be non-trivial or if a pte is already loaded 4453 * into the slot. 4454 * 4455 * XXX This is safe only because page table pages are not freed. 4456 */ 4457 int 4458 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr) 4459 { 4460 pt_entry_t *pte; 4461 4462 /*spin_lock(&pmap->pm_spin);*/ 4463 if ((pte = pmap_pte(pmap, addr)) != NULL) { 4464 if (*pte & pmap->pmap_bits[PG_V_IDX]) { 4465 /*spin_unlock(&pmap->pm_spin);*/ 4466 return FALSE; 4467 } 4468 } 4469 /*spin_unlock(&pmap->pm_spin);*/ 4470 return TRUE; 4471 } 4472 4473 /* 4474 * Change the wiring attribute for a pmap/va pair. The mapping must already 4475 * exist in the pmap. The mapping may or may not be managed. 4476 */ 4477 void 4478 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired, 4479 vm_map_entry_t entry) 4480 { 4481 pt_entry_t *ptep; 4482 pv_entry_t pv; 4483 4484 if (pmap == NULL) 4485 return; 4486 lwkt_gettoken(&pmap->pm_token); 4487 pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va), NULL, entry, va); 4488 ptep = pv_pte_lookup(pv, pmap_pte_index(va)); 4489 4490 if (wired && !pmap_pte_w(pmap, ptep)) 4491 atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, 1); 4492 else if (!wired && pmap_pte_w(pmap, ptep)) 4493 atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, -1); 4494 4495 /* 4496 * Wiring is not a hardware characteristic so there is no need to 4497 * invalidate TLB. However, in an SMP environment we must use 4498 * a locked bus cycle to update the pte (if we are not using 4499 * the pmap_inval_*() API that is)... it's ok to do this for simple 4500 * wiring changes. 4501 */ 4502 if (wired) 4503 atomic_set_long(ptep, pmap->pmap_bits[PG_W_IDX]); 4504 else 4505 atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]); 4506 pv_put(pv); 4507 lwkt_reltoken(&pmap->pm_token); 4508 } 4509 4510 4511 4512 /* 4513 * Copy the range specified by src_addr/len from the source map to 4514 * the range dst_addr/len in the destination map. 4515 * 4516 * This routine is only advisory and need not do anything. 4517 */ 4518 void 4519 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 4520 vm_size_t len, vm_offset_t src_addr) 4521 { 4522 } 4523 4524 /* 4525 * pmap_zero_page: 4526 * 4527 * Zero the specified physical page. 4528 * 4529 * This function may be called from an interrupt and no locking is 4530 * required. 4531 */ 4532 void 4533 pmap_zero_page(vm_paddr_t phys) 4534 { 4535 vm_offset_t va = PHYS_TO_DMAP(phys); 4536 4537 pagezero((void *)va); 4538 } 4539 4540 /* 4541 * pmap_page_assertzero: 4542 * 4543 * Assert that a page is empty, panic if it isn't. 4544 */ 4545 void 4546 pmap_page_assertzero(vm_paddr_t phys) 4547 { 4548 vm_offset_t va = PHYS_TO_DMAP(phys); 4549 size_t i; 4550 4551 for (i = 0; i < PAGE_SIZE; i += sizeof(long)) { 4552 if (*(long *)((char *)va + i) != 0) { 4553 panic("pmap_page_assertzero() @ %p not zero!", 4554 (void *)(intptr_t)va); 4555 } 4556 } 4557 } 4558 4559 /* 4560 * pmap_zero_page: 4561 * 4562 * Zero part of a physical page by mapping it into memory and clearing 4563 * its contents with bzero. 4564 * 4565 * off and size may not cover an area beyond a single hardware page. 4566 */ 4567 void 4568 pmap_zero_page_area(vm_paddr_t phys, int off, int size) 4569 { 4570 vm_offset_t virt = PHYS_TO_DMAP(phys); 4571 4572 bzero((char *)virt + off, size); 4573 } 4574 4575 /* 4576 * pmap_copy_page: 4577 * 4578 * Copy the physical page from the source PA to the target PA. 4579 * This function may be called from an interrupt. No locking 4580 * is required. 4581 */ 4582 void 4583 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst) 4584 { 4585 vm_offset_t src_virt, dst_virt; 4586 4587 src_virt = PHYS_TO_DMAP(src); 4588 dst_virt = PHYS_TO_DMAP(dst); 4589 bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE); 4590 } 4591 4592 /* 4593 * pmap_copy_page_frag: 4594 * 4595 * Copy the physical page from the source PA to the target PA. 4596 * This function may be called from an interrupt. No locking 4597 * is required. 4598 */ 4599 void 4600 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes) 4601 { 4602 vm_offset_t src_virt, dst_virt; 4603 4604 src_virt = PHYS_TO_DMAP(src); 4605 dst_virt = PHYS_TO_DMAP(dst); 4606 4607 bcopy((char *)src_virt + (src & PAGE_MASK), 4608 (char *)dst_virt + (dst & PAGE_MASK), 4609 bytes); 4610 } 4611 4612 /* 4613 * Returns true if the pmap's pv is one of the first 16 pvs linked to from 4614 * this page. This count may be changed upwards or downwards in the future; 4615 * it is only necessary that true be returned for a small subset of pmaps 4616 * for proper page aging. 4617 */ 4618 boolean_t 4619 pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 4620 { 4621 pv_entry_t pv; 4622 int loops = 0; 4623 4624 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 4625 return FALSE; 4626 4627 vm_page_spin_lock(m); 4628 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4629 if (pv->pv_pmap == pmap) { 4630 vm_page_spin_unlock(m); 4631 return TRUE; 4632 } 4633 loops++; 4634 if (loops >= 16) 4635 break; 4636 } 4637 vm_page_spin_unlock(m); 4638 return (FALSE); 4639 } 4640 4641 /* 4642 * Remove all pages from specified address space this aids process exit 4643 * speeds. Also, this code may be special cased for the current process 4644 * only. 4645 */ 4646 void 4647 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4648 { 4649 pmap_remove_noinval(pmap, sva, eva); 4650 cpu_invltlb(); 4651 } 4652 4653 /* 4654 * pmap_testbit tests bits in pte's note that the testbit/clearbit 4655 * routines are inline, and a lot of things compile-time evaluate. 4656 */ 4657 static 4658 boolean_t 4659 pmap_testbit(vm_page_t m, int bit) 4660 { 4661 pv_entry_t pv; 4662 pt_entry_t *pte; 4663 pmap_t pmap; 4664 4665 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 4666 return FALSE; 4667 4668 if (TAILQ_FIRST(&m->md.pv_list) == NULL) 4669 return FALSE; 4670 vm_page_spin_lock(m); 4671 if (TAILQ_FIRST(&m->md.pv_list) == NULL) { 4672 vm_page_spin_unlock(m); 4673 return FALSE; 4674 } 4675 4676 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4677 4678 #if defined(PMAP_DIAGNOSTIC) 4679 if (pv->pv_pmap == NULL) { 4680 kprintf("Null pmap (tb) at pindex: %"PRIu64"\n", 4681 pv->pv_pindex); 4682 continue; 4683 } 4684 #endif 4685 pmap = pv->pv_pmap; 4686 4687 /* 4688 * If the bit being tested is the modified bit, then 4689 * mark clean_map and ptes as never 4690 * modified. 4691 * 4692 * WARNING! Because we do not lock the pv, *pte can be in a 4693 * state of flux. Despite this the value of *pte 4694 * will still be related to the vm_page in some way 4695 * because the pv cannot be destroyed as long as we 4696 * hold the vm_page spin lock. 4697 */ 4698 if (bit == PG_A_IDX || bit == PG_M_IDX) { 4699 //& (pmap->pmap_bits[PG_A_IDX] | pmap->pmap_bits[PG_M_IDX])) { 4700 if (!pmap_track_modified(pv->pv_pindex)) 4701 continue; 4702 } 4703 4704 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT); 4705 if (*pte & pmap->pmap_bits[bit]) { 4706 vm_page_spin_unlock(m); 4707 return TRUE; 4708 } 4709 } 4710 vm_page_spin_unlock(m); 4711 return (FALSE); 4712 } 4713 4714 /* 4715 * This routine is used to modify bits in ptes. Only one bit should be 4716 * specified. PG_RW requires special handling. 4717 * 4718 * Caller must NOT hold any spin locks 4719 */ 4720 static __inline 4721 void 4722 pmap_clearbit(vm_page_t m, int bit_index) 4723 { 4724 struct pmap_inval_info info; 4725 pv_entry_t pv; 4726 pt_entry_t *pte; 4727 pt_entry_t pbits; 4728 pmap_t pmap; 4729 4730 if (bit_index == PG_RW_IDX) 4731 vm_page_flag_clear(m, PG_WRITEABLE); 4732 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { 4733 return; 4734 } 4735 4736 /* 4737 * PG_M or PG_A case 4738 * 4739 * Loop over all current mappings setting/clearing as appropos If 4740 * setting RO do we need to clear the VAC? 4741 * 4742 * NOTE: When clearing PG_M we could also (not implemented) drop 4743 * through to the PG_RW code and clear PG_RW too, forcing 4744 * a fault on write to redetect PG_M for virtual kernels, but 4745 * it isn't necessary since virtual kernels invalidate the 4746 * pte when they clear the VPTE_M bit in their virtual page 4747 * tables. 4748 * 4749 * NOTE: Does not re-dirty the page when clearing only PG_M. 4750 * 4751 * NOTE: Because we do not lock the pv, *pte can be in a state of 4752 * flux. Despite this the value of *pte is still somewhat 4753 * related while we hold the vm_page spin lock. 4754 * 4755 * *pte can be zero due to this race. Since we are clearing 4756 * bits we basically do no harm when this race ccurs. 4757 */ 4758 if (bit_index != PG_RW_IDX) { 4759 vm_page_spin_lock(m); 4760 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4761 #if defined(PMAP_DIAGNOSTIC) 4762 if (pv->pv_pmap == NULL) { 4763 kprintf("Null pmap (cb) at pindex: %"PRIu64"\n", 4764 pv->pv_pindex); 4765 continue; 4766 } 4767 #endif 4768 pmap = pv->pv_pmap; 4769 pte = pmap_pte_quick(pv->pv_pmap, 4770 pv->pv_pindex << PAGE_SHIFT); 4771 pbits = *pte; 4772 if (pbits & pmap->pmap_bits[bit_index]) 4773 atomic_clear_long(pte, pmap->pmap_bits[bit_index]); 4774 } 4775 vm_page_spin_unlock(m); 4776 return; 4777 } 4778 4779 /* 4780 * Clear PG_RW. Also clears PG_M and marks the page dirty if PG_M 4781 * was set. 4782 */ 4783 pmap_inval_init(&info); 4784 4785 restart: 4786 vm_page_spin_lock(m); 4787 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4788 /* 4789 * don't write protect pager mappings 4790 */ 4791 if (!pmap_track_modified(pv->pv_pindex)) 4792 continue; 4793 4794 #if defined(PMAP_DIAGNOSTIC) 4795 if (pv->pv_pmap == NULL) { 4796 kprintf("Null pmap (cb) at pindex: %"PRIu64"\n", 4797 pv->pv_pindex); 4798 continue; 4799 } 4800 #endif 4801 pmap = pv->pv_pmap; 4802 /* 4803 * Skip pages which do not have PG_RW set. 4804 */ 4805 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT); 4806 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0) 4807 continue; 4808 4809 /* 4810 * Lock the PV 4811 */ 4812 if (pv_hold_try(pv)) { 4813 vm_page_spin_unlock(m); 4814 } else { 4815 vm_page_spin_unlock(m); 4816 pv_lock(pv); /* held, now do a blocking lock */ 4817 } 4818 if (pv->pv_pmap != pmap || pv->pv_m != m) { 4819 pv_put(pv); /* and release */ 4820 goto restart; /* anything could have happened */ 4821 } 4822 pmap_inval_interlock(&info, pmap, 4823 (vm_offset_t)pv->pv_pindex << PAGE_SHIFT); 4824 KKASSERT(pv->pv_pmap == pmap); 4825 for (;;) { 4826 pbits = *pte; 4827 cpu_ccfence(); 4828 if (atomic_cmpset_long(pte, pbits, pbits & 4829 ~(pmap->pmap_bits[PG_RW_IDX] | 4830 pmap->pmap_bits[PG_M_IDX]))) { 4831 break; 4832 } 4833 } 4834 pmap_inval_deinterlock(&info, pmap); 4835 vm_page_spin_lock(m); 4836 4837 /* 4838 * If PG_M was found to be set while we were clearing PG_RW 4839 * we also clear PG_M (done above) and mark the page dirty. 4840 * Callers expect this behavior. 4841 */ 4842 if (pbits & pmap->pmap_bits[PG_M_IDX]) 4843 vm_page_dirty(m); 4844 pv_put(pv); 4845 } 4846 vm_page_spin_unlock(m); 4847 pmap_inval_done(&info); 4848 } 4849 4850 /* 4851 * Lower the permission for all mappings to a given page. 4852 * 4853 * Page must be busied by caller. Because page is busied by caller this 4854 * should not be able to race a pmap_enter(). 4855 */ 4856 void 4857 pmap_page_protect(vm_page_t m, vm_prot_t prot) 4858 { 4859 /* JG NX support? */ 4860 if ((prot & VM_PROT_WRITE) == 0) { 4861 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 4862 /* 4863 * NOTE: pmap_clearbit(.. PG_RW) also clears 4864 * the PG_WRITEABLE flag in (m). 4865 */ 4866 pmap_clearbit(m, PG_RW_IDX); 4867 } else { 4868 pmap_remove_all(m); 4869 } 4870 } 4871 } 4872 4873 vm_paddr_t 4874 pmap_phys_address(vm_pindex_t ppn) 4875 { 4876 return (x86_64_ptob(ppn)); 4877 } 4878 4879 /* 4880 * Return a count of reference bits for a page, clearing those bits. 4881 * It is not necessary for every reference bit to be cleared, but it 4882 * is necessary that 0 only be returned when there are truly no 4883 * reference bits set. 4884 * 4885 * XXX: The exact number of bits to check and clear is a matter that 4886 * should be tested and standardized at some point in the future for 4887 * optimal aging of shared pages. 4888 * 4889 * This routine may not block. 4890 */ 4891 int 4892 pmap_ts_referenced(vm_page_t m) 4893 { 4894 pv_entry_t pv; 4895 pt_entry_t *pte; 4896 pmap_t pmap; 4897 int rtval = 0; 4898 4899 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 4900 return (rtval); 4901 4902 vm_page_spin_lock(m); 4903 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4904 if (!pmap_track_modified(pv->pv_pindex)) 4905 continue; 4906 pmap = pv->pv_pmap; 4907 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT); 4908 if (pte && (*pte & pmap->pmap_bits[PG_A_IDX])) { 4909 atomic_clear_long(pte, pmap->pmap_bits[PG_A_IDX]); 4910 rtval++; 4911 if (rtval > 4) 4912 break; 4913 } 4914 } 4915 vm_page_spin_unlock(m); 4916 return (rtval); 4917 } 4918 4919 /* 4920 * pmap_is_modified: 4921 * 4922 * Return whether or not the specified physical page was modified 4923 * in any physical maps. 4924 */ 4925 boolean_t 4926 pmap_is_modified(vm_page_t m) 4927 { 4928 boolean_t res; 4929 4930 res = pmap_testbit(m, PG_M_IDX); 4931 return (res); 4932 } 4933 4934 /* 4935 * Clear the modify bits on the specified physical page. 4936 */ 4937 void 4938 pmap_clear_modify(vm_page_t m) 4939 { 4940 pmap_clearbit(m, PG_M_IDX); 4941 } 4942 4943 /* 4944 * pmap_clear_reference: 4945 * 4946 * Clear the reference bit on the specified physical page. 4947 */ 4948 void 4949 pmap_clear_reference(vm_page_t m) 4950 { 4951 pmap_clearbit(m, PG_A_IDX); 4952 } 4953 4954 /* 4955 * Miscellaneous support routines follow 4956 */ 4957 4958 static 4959 void 4960 i386_protection_init(void) 4961 { 4962 int *kp, prot; 4963 4964 /* JG NX support may go here; No VM_PROT_EXECUTE ==> set NX bit */ 4965 kp = protection_codes; 4966 for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) { 4967 switch (prot) { 4968 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 4969 /* 4970 * Read access is also 0. There isn't any execute bit, 4971 * so just make it readable. 4972 */ 4973 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 4974 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 4975 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 4976 *kp++ = 0; 4977 break; 4978 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 4979 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 4980 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 4981 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 4982 *kp++ = pmap_bits_default[PG_RW_IDX]; 4983 break; 4984 } 4985 } 4986 } 4987 4988 /* 4989 * Map a set of physical memory pages into the kernel virtual 4990 * address space. Return a pointer to where it is mapped. This 4991 * routine is intended to be used for mapping device memory, 4992 * NOT real memory. 4993 * 4994 * NOTE: We can't use pgeflag unless we invalidate the pages one at 4995 * a time. 4996 * 4997 * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE} 4998 * work whether the cpu supports PAT or not. The remaining PAT 4999 * attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu 5000 * supports PAT. 5001 */ 5002 void * 5003 pmap_mapdev(vm_paddr_t pa, vm_size_t size) 5004 { 5005 return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 5006 } 5007 5008 void * 5009 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size) 5010 { 5011 return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 5012 } 5013 5014 void * 5015 pmap_mapbios(vm_paddr_t pa, vm_size_t size) 5016 { 5017 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 5018 } 5019 5020 /* 5021 * Map a set of physical memory pages into the kernel virtual 5022 * address space. Return a pointer to where it is mapped. This 5023 * routine is intended to be used for mapping device memory, 5024 * NOT real memory. 5025 */ 5026 void * 5027 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 5028 { 5029 vm_offset_t va, tmpva, offset; 5030 pt_entry_t *pte; 5031 vm_size_t tmpsize; 5032 5033 offset = pa & PAGE_MASK; 5034 size = roundup(offset + size, PAGE_SIZE); 5035 5036 va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE); 5037 if (va == 0) 5038 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 5039 5040 pa = pa & ~PAGE_MASK; 5041 for (tmpva = va, tmpsize = size; tmpsize > 0;) { 5042 pte = vtopte(tmpva); 5043 *pte = pa | 5044 kernel_pmap.pmap_bits[PG_RW_IDX] | 5045 kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */ 5046 kernel_pmap.pmap_cache_bits[mode]; 5047 tmpsize -= PAGE_SIZE; 5048 tmpva += PAGE_SIZE; 5049 pa += PAGE_SIZE; 5050 } 5051 pmap_invalidate_range(&kernel_pmap, va, va + size); 5052 pmap_invalidate_cache_range(va, va + size); 5053 5054 return ((void *)(va + offset)); 5055 } 5056 5057 void 5058 pmap_unmapdev(vm_offset_t va, vm_size_t size) 5059 { 5060 vm_offset_t base, offset; 5061 5062 base = va & ~PAGE_MASK; 5063 offset = va & PAGE_MASK; 5064 size = roundup(offset + size, PAGE_SIZE); 5065 pmap_qremove(va, size >> PAGE_SHIFT); 5066 kmem_free(&kernel_map, base, size); 5067 } 5068 5069 /* 5070 * Sets the memory attribute for the specified page. 5071 */ 5072 void 5073 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5074 { 5075 5076 m->pat_mode = ma; 5077 5078 /* 5079 * If "m" is a normal page, update its direct mapping. This update 5080 * can be relied upon to perform any cache operations that are 5081 * required for data coherence. 5082 */ 5083 if ((m->flags & PG_FICTITIOUS) == 0) 5084 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode); 5085 } 5086 5087 /* 5088 * Change the PAT attribute on an existing kernel memory map. Caller 5089 * must ensure that the virtual memory in question is not accessed 5090 * during the adjustment. 5091 */ 5092 void 5093 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode) 5094 { 5095 pt_entry_t *pte; 5096 vm_offset_t base; 5097 int changed = 0; 5098 5099 if (va == 0) 5100 panic("pmap_change_attr: va is NULL"); 5101 base = trunc_page(va); 5102 5103 while (count) { 5104 pte = vtopte(va); 5105 *pte = (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask)) | 5106 kernel_pmap.pmap_cache_bits[mode]; 5107 --count; 5108 va += PAGE_SIZE; 5109 } 5110 5111 changed = 1; /* XXX: not optimal */ 5112 5113 /* 5114 * Flush CPU caches if required to make sure any data isn't cached that 5115 * shouldn't be, etc. 5116 */ 5117 if (changed) { 5118 pmap_invalidate_range(&kernel_pmap, base, va); 5119 pmap_invalidate_cache_range(base, va); 5120 } 5121 } 5122 5123 /* 5124 * perform the pmap work for mincore 5125 */ 5126 int 5127 pmap_mincore(pmap_t pmap, vm_offset_t addr) 5128 { 5129 pt_entry_t *ptep, pte; 5130 vm_page_t m; 5131 int val = 0; 5132 5133 lwkt_gettoken(&pmap->pm_token); 5134 ptep = pmap_pte(pmap, addr); 5135 5136 if (ptep && (pte = *ptep) != 0) { 5137 vm_offset_t pa; 5138 5139 val = MINCORE_INCORE; 5140 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) 5141 goto done; 5142 5143 pa = pte & PG_FRAME; 5144 5145 if (pte & pmap->pmap_bits[PG_DEVICE_IDX]) 5146 m = NULL; 5147 else 5148 m = PHYS_TO_VM_PAGE(pa); 5149 5150 /* 5151 * Modified by us 5152 */ 5153 if (pte & pmap->pmap_bits[PG_M_IDX]) 5154 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 5155 /* 5156 * Modified by someone 5157 */ 5158 else if (m && (m->dirty || pmap_is_modified(m))) 5159 val |= MINCORE_MODIFIED_OTHER; 5160 /* 5161 * Referenced by us 5162 */ 5163 if (pte & pmap->pmap_bits[PG_A_IDX]) 5164 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 5165 5166 /* 5167 * Referenced by someone 5168 */ 5169 else if (m && ((m->flags & PG_REFERENCED) || 5170 pmap_ts_referenced(m))) { 5171 val |= MINCORE_REFERENCED_OTHER; 5172 vm_page_flag_set(m, PG_REFERENCED); 5173 } 5174 } 5175 done: 5176 lwkt_reltoken(&pmap->pm_token); 5177 5178 return val; 5179 } 5180 5181 /* 5182 * Replace p->p_vmspace with a new one. If adjrefs is non-zero the new 5183 * vmspace will be ref'd and the old one will be deref'd. 5184 * 5185 * The vmspace for all lwps associated with the process will be adjusted 5186 * and cr3 will be reloaded if any lwp is the current lwp. 5187 * 5188 * The process must hold the vmspace->vm_map.token for oldvm and newvm 5189 */ 5190 void 5191 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs) 5192 { 5193 struct vmspace *oldvm; 5194 struct lwp *lp; 5195 5196 oldvm = p->p_vmspace; 5197 if (oldvm != newvm) { 5198 if (adjrefs) 5199 vmspace_ref(newvm); 5200 p->p_vmspace = newvm; 5201 KKASSERT(p->p_nthreads == 1); 5202 lp = RB_ROOT(&p->p_lwp_tree); 5203 pmap_setlwpvm(lp, newvm); 5204 if (adjrefs) 5205 vmspace_rel(oldvm); 5206 } 5207 } 5208 5209 /* 5210 * Set the vmspace for a LWP. The vmspace is almost universally set the 5211 * same as the process vmspace, but virtual kernels need to swap out contexts 5212 * on a per-lwp basis. 5213 * 5214 * Caller does not necessarily hold any vmspace tokens. Caller must control 5215 * the lwp (typically be in the context of the lwp). We use a critical 5216 * section to protect against statclock and hardclock (statistics collection). 5217 */ 5218 void 5219 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm) 5220 { 5221 struct vmspace *oldvm; 5222 struct pmap *pmap; 5223 5224 oldvm = lp->lwp_vmspace; 5225 5226 if (oldvm != newvm) { 5227 crit_enter(); 5228 lp->lwp_vmspace = newvm; 5229 if (curthread->td_lwp == lp) { 5230 pmap = vmspace_pmap(newvm); 5231 ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid); 5232 if (pmap->pm_active_lock & CPULOCK_EXCL) 5233 pmap_interlock_wait(newvm); 5234 #if defined(SWTCH_OPTIM_STATS) 5235 tlb_flush_count++; 5236 #endif 5237 if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) { 5238 curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4); 5239 } else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) { 5240 curthread->td_pcb->pcb_cr3 = KPML4phys; 5241 } else { 5242 panic("pmap_setlwpvm: unknown pmap type\n"); 5243 } 5244 load_cr3(curthread->td_pcb->pcb_cr3); 5245 pmap = vmspace_pmap(oldvm); 5246 ATOMIC_CPUMASK_NANDBIT(pmap->pm_active, 5247 mycpu->gd_cpuid); 5248 } 5249 crit_exit(); 5250 } 5251 } 5252 5253 /* 5254 * Called when switching to a locked pmap, used to interlock against pmaps 5255 * undergoing modifications to prevent us from activating the MMU for the 5256 * target pmap until all such modifications have completed. We have to do 5257 * this because the thread making the modifications has already set up its 5258 * SMP synchronization mask. 5259 * 5260 * This function cannot sleep! 5261 * 5262 * No requirements. 5263 */ 5264 void 5265 pmap_interlock_wait(struct vmspace *vm) 5266 { 5267 struct pmap *pmap = &vm->vm_pmap; 5268 5269 if (pmap->pm_active_lock & CPULOCK_EXCL) { 5270 crit_enter(); 5271 KKASSERT(curthread->td_critcount >= 2); 5272 DEBUG_PUSH_INFO("pmap_interlock_wait"); 5273 while (pmap->pm_active_lock & CPULOCK_EXCL) { 5274 cpu_ccfence(); 5275 lwkt_process_ipiq(); 5276 } 5277 DEBUG_POP_INFO(); 5278 crit_exit(); 5279 } 5280 } 5281 5282 vm_offset_t 5283 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 5284 { 5285 5286 if ((obj == NULL) || (size < NBPDR) || 5287 ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) { 5288 return addr; 5289 } 5290 5291 addr = roundup2(addr, NBPDR); 5292 return addr; 5293 } 5294 5295 /* 5296 * Used by kmalloc/kfree, page already exists at va 5297 */ 5298 vm_page_t 5299 pmap_kvtom(vm_offset_t va) 5300 { 5301 pt_entry_t *ptep = vtopte(va); 5302 5303 KKASSERT((*ptep & kernel_pmap.pmap_bits[PG_DEVICE_IDX]) == 0); 5304 return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME)); 5305 } 5306 5307 /* 5308 * Initialize machine-specific shared page directory support. This 5309 * is executed when a VM object is created. 5310 */ 5311 void 5312 pmap_object_init(vm_object_t object) 5313 { 5314 object->md.pmap_rw = NULL; 5315 object->md.pmap_ro = NULL; 5316 } 5317 5318 /* 5319 * Clean up machine-specific shared page directory support. This 5320 * is executed when a VM object is destroyed. 5321 */ 5322 void 5323 pmap_object_free(vm_object_t object) 5324 { 5325 pmap_t pmap; 5326 5327 if ((pmap = object->md.pmap_rw) != NULL) { 5328 object->md.pmap_rw = NULL; 5329 pmap_remove_noinval(pmap, 5330 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS); 5331 CPUMASK_ASSZERO(pmap->pm_active); 5332 pmap_release(pmap); 5333 pmap_puninit(pmap); 5334 kfree(pmap, M_OBJPMAP); 5335 } 5336 if ((pmap = object->md.pmap_ro) != NULL) { 5337 object->md.pmap_ro = NULL; 5338 pmap_remove_noinval(pmap, 5339 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS); 5340 CPUMASK_ASSZERO(pmap->pm_active); 5341 pmap_release(pmap); 5342 pmap_puninit(pmap); 5343 kfree(pmap, M_OBJPMAP); 5344 } 5345 } 5346