1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * the Systems Programming Group of the University of Utah Computer 15 * Science Department and William Jolitz of UUNET Technologies Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. All advertising materials mentioning features or use of this software 26 * must display the following acknowledgement: 27 * This product includes software developed by the University of 28 * California, Berkeley and its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 */ 45 /*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * Copyright (c) 2018 The FreeBSD Foundation 49 * All rights reserved. 50 * 51 * This software was developed for the FreeBSD Project by Jake Burkholder, 52 * Safeport Network Services, and Network Associates Laboratories, the 53 * Security Research Division of Network Associates, Inc. under 54 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 55 * CHATS research program. 56 * 57 * Portions of this software were developed by 58 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from 59 * the FreeBSD Foundation. 60 * 61 * Redistribution and use in source and binary forms, with or without 62 * modification, are permitted provided that the following conditions 63 * are met: 64 * 1. Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * 2. Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * 70 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 71 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 72 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 73 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 74 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 75 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 76 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 77 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 78 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 79 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 80 * SUCH DAMAGE. 81 */ 82 83 #include <sys/cdefs.h> 84 /* 85 * Manages physical address maps. 86 * 87 * Since the information managed by this module is 88 * also stored by the logical address mapping module, 89 * this module may throw away valid virtual-to-physical 90 * mappings at almost any time. However, invalidations 91 * of virtual-to-physical mappings must be done as 92 * requested. 93 * 94 * In order to cope with hardware architectures which 95 * make virtual-to-physical map invalidates expensive, 96 * this module may delay invalidate or reduced protection 97 * operations until such time as they are actually 98 * necessary. This module is given full information as 99 * to which processors are currently using which maps, 100 * and to when physical maps must be made correct. 101 */ 102 103 #include "opt_apic.h" 104 #include "opt_cpu.h" 105 #include "opt_pmap.h" 106 #include "opt_smp.h" 107 #include "opt_vm.h" 108 109 #include <sys/param.h> 110 #include <sys/systm.h> 111 #include <sys/kernel.h> 112 #include <sys/ktr.h> 113 #include <sys/lock.h> 114 #include <sys/malloc.h> 115 #include <sys/mman.h> 116 #include <sys/msgbuf.h> 117 #include <sys/mutex.h> 118 #include <sys/proc.h> 119 #include <sys/rwlock.h> 120 #include <sys/sbuf.h> 121 #include <sys/sf_buf.h> 122 #include <sys/sx.h> 123 #include <sys/vmmeter.h> 124 #include <sys/sched.h> 125 #include <sys/sysctl.h> 126 #include <sys/smp.h> 127 #include <sys/vmem.h> 128 129 #include <vm/vm.h> 130 #include <vm/vm_param.h> 131 #include <vm/vm_kern.h> 132 #include <vm/vm_page.h> 133 #include <vm/vm_map.h> 134 #include <vm/vm_object.h> 135 #include <vm/vm_extern.h> 136 #include <vm/vm_pageout.h> 137 #include <vm/vm_pager.h> 138 #include <vm/vm_phys.h> 139 #include <vm/vm_radix.h> 140 #include <vm/vm_reserv.h> 141 #include <vm/uma.h> 142 143 #ifdef DEV_APIC 144 #include <sys/bus.h> 145 #include <machine/intr_machdep.h> 146 #include <x86/apicvar.h> 147 #endif 148 #include <x86/ifunc.h> 149 #include <machine/bootinfo.h> 150 #include <machine/cpu.h> 151 #include <machine/cputypes.h> 152 #include <machine/md_var.h> 153 #include <machine/pcb.h> 154 #include <machine/specialreg.h> 155 #ifdef SMP 156 #include <machine/smp.h> 157 #endif 158 #include <machine/pmap_base.h> 159 160 #ifdef PV_STATS 161 #define PV_STAT(x) do { x ; } while (0) 162 #else 163 #define PV_STAT(x) do { } while (0) 164 #endif 165 166 #define pa_index(pa) ((pa) >> PDRSHIFT) 167 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 168 169 /* 170 * PTmap is recursive pagemap at top of virtual address space. 171 * Within PTmap, the page directory can be found (third indirection). 172 */ 173 #define PTmap ((pt_entry_t *)(PTDPTDI << PDRSHIFT)) 174 #define PTD ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE))) 175 #define PTDpde ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE) + \ 176 (PTDPTDI * PDESIZE))) 177 178 /* 179 * Translate a virtual address to the kernel virtual address of its page table 180 * entry (PTE). This can be used recursively. If the address of a PTE as 181 * previously returned by this macro is itself given as the argument, then the 182 * address of the page directory entry (PDE) that maps the PTE will be 183 * returned. 184 * 185 * This macro may be used before pmap_bootstrap() is called. 186 */ 187 #define vtopte(va) (PTmap + i386_btop(va)) 188 189 /* 190 * Get PDEs and PTEs for user/kernel address space 191 */ 192 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 193 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 194 195 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 196 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 197 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 198 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 199 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 200 201 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ 202 atomic_clear_int((u_int *)(pte), PG_W)) 203 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 204 205 static int pgeflag = 0; /* PG_G or-in */ 206 static int pseflag = 0; /* PG_PS or-in */ 207 208 static int nkpt = NKPT; 209 210 #ifdef PMAP_PAE_COMP 211 pt_entry_t pg_nx; 212 static uma_zone_t pdptzone; 213 #else 214 #define pg_nx 0 215 #endif 216 217 _Static_assert(VM_MAXUSER_ADDRESS == VADDR(TRPTDI, 0), "VM_MAXUSER_ADDRESS"); 218 _Static_assert(VM_MAX_KERNEL_ADDRESS <= VADDR(PTDPTDI, 0), 219 "VM_MAX_KERNEL_ADDRESS"); 220 _Static_assert(PMAP_MAP_LOW == VADDR(LOWPTDI, 0), "PMAP_MAP_LOW"); 221 _Static_assert(KERNLOAD == (KERNPTDI << PDRSHIFT), "KERNLOAD"); 222 223 extern int pat_works; 224 extern int pg_ps_enabled; 225 226 extern int elf32_nxstack; 227 228 #define PAT_INDEX_SIZE 8 229 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ 230 231 /* 232 * pmap_mapdev support pre initialization (i.e. console) 233 */ 234 #define PMAP_PREINIT_MAPPING_COUNT 8 235 static struct pmap_preinit_mapping { 236 vm_paddr_t pa; 237 vm_offset_t va; 238 vm_size_t sz; 239 int mode; 240 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; 241 static int pmap_initialized; 242 243 static struct rwlock_padalign pvh_global_lock; 244 245 /* 246 * Data for the pv entry allocation mechanism 247 */ 248 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 249 extern int pv_entry_max, pv_entry_count; 250 static int pv_entry_high_water = 0; 251 static struct md_page *pv_table; 252 extern int shpgperproc; 253 254 static struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 255 static int pv_maxchunks; /* How many chunks we have KVA for */ 256 static vm_offset_t pv_vafree; /* freelist stored in the PTE */ 257 258 /* 259 * All those kernel PT submaps that BSD is so fond of 260 */ 261 static pt_entry_t *CMAP3; 262 static pd_entry_t *KPTD; 263 static caddr_t CADDR3; 264 265 /* 266 * Crashdump maps. 267 */ 268 static caddr_t crashdumpmap; 269 270 static pt_entry_t *PMAP1 = NULL, *PMAP2, *PMAP3; 271 static pt_entry_t *PADDR1 = NULL, *PADDR2, *PADDR3; 272 #ifdef SMP 273 static int PMAP1cpu, PMAP3cpu; 274 extern int PMAP1changedcpu; 275 #endif 276 extern int PMAP1changed; 277 extern int PMAP1unchanged; 278 static struct mtx PMAP2mutex; 279 280 /* 281 * Internal flags for pmap_enter()'s helper functions. 282 */ 283 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 284 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 285 286 static void free_pv_chunk(struct pv_chunk *pc); 287 static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 288 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 289 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 290 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, 291 u_int flags); 292 #if VM_NRESERVLEVEL > 0 293 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 294 #endif 295 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 296 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 297 vm_offset_t va); 298 static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); 299 300 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); 301 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 302 static int pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, 303 vm_prot_t prot); 304 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, 305 u_int flags, vm_page_t m); 306 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 307 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 308 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted, 309 bool allpte_PG_A_set); 310 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, 311 pd_entry_t pde); 312 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); 313 static boolean_t pmap_is_modified_pvh(struct md_page *pvh); 314 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh); 315 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 316 static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); 317 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); 318 #if VM_NRESERVLEVEL > 0 319 static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, 320 vm_page_t mpte); 321 #endif 322 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, 323 vm_prot_t prot); 324 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); 325 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 326 struct spglist *free); 327 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 328 struct spglist *free); 329 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); 330 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free); 331 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 332 struct spglist *free); 333 static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va); 334 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 335 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 336 vm_page_t m); 337 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 338 pd_entry_t newpde); 339 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); 340 341 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); 342 343 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); 344 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); 345 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 346 static void pmap_pte_release(pt_entry_t *pte); 347 static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); 348 #ifdef PMAP_PAE_COMP 349 static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, 350 uint8_t *flags, int wait); 351 #endif 352 static void pmap_init_trm(void); 353 static void pmap_invalidate_all_int(pmap_t pmap); 354 355 static __inline void pagezero(void *page); 356 357 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 358 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 359 360 extern char _end[]; 361 extern u_long physfree; /* phys addr of next free page */ 362 extern u_long vm86phystk;/* PA of vm86/bios stack */ 363 extern u_long vm86paddr;/* address of vm86 region */ 364 extern int vm86pa; /* phys addr of vm86 region */ 365 extern u_long KERNend; /* phys addr end of kernel (just after bss) */ 366 #ifdef PMAP_PAE_COMP 367 pd_entry_t *IdlePTD_pae; /* phys addr of kernel PTD */ 368 pdpt_entry_t *IdlePDPT; /* phys addr of kernel PDPT */ 369 pt_entry_t *KPTmap_pae; /* address of kernel page tables */ 370 #define IdlePTD IdlePTD_pae 371 #define KPTmap KPTmap_pae 372 #else 373 pd_entry_t *IdlePTD_nopae; 374 pt_entry_t *KPTmap_nopae; 375 #define IdlePTD IdlePTD_nopae 376 #define KPTmap KPTmap_nopae 377 #endif 378 extern u_long KPTphys; /* phys addr of kernel page tables */ 379 extern u_long tramp_idleptd; 380 381 static u_long 382 allocpages(u_int cnt, u_long *physfree) 383 { 384 u_long res; 385 386 res = *physfree; 387 *physfree += PAGE_SIZE * cnt; 388 bzero((void *)res, PAGE_SIZE * cnt); 389 return (res); 390 } 391 392 static void 393 pmap_cold_map(u_long pa, u_long va, u_long cnt) 394 { 395 pt_entry_t *pt; 396 397 for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0; 398 cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE) 399 *pt = pa | PG_V | PG_RW | PG_A | PG_M; 400 } 401 402 static void 403 pmap_cold_mapident(u_long pa, u_long cnt) 404 { 405 406 pmap_cold_map(pa, pa, cnt); 407 } 408 409 _Static_assert(LOWPTDI * 2 * NBPDR == KERNBASE, 410 "Broken double-map of zero PTD"); 411 412 static void 413 __CONCAT(PMTYPE, remap_lower)(bool enable) 414 { 415 int i; 416 417 for (i = 0; i < LOWPTDI; i++) 418 IdlePTD[i] = enable ? IdlePTD[LOWPTDI + i] : 0; 419 load_cr3(rcr3()); /* invalidate TLB */ 420 } 421 422 /* 423 * Called from locore.s before paging is enabled. Sets up the first 424 * kernel page table. Since kernel is mapped with PA == VA, this code 425 * does not require relocations. 426 */ 427 void 428 __CONCAT(PMTYPE, cold)(void) 429 { 430 pt_entry_t *pt; 431 u_long a; 432 u_int cr3, ncr4; 433 434 physfree = (u_long)&_end; 435 if (bootinfo.bi_esymtab != 0) 436 physfree = bootinfo.bi_esymtab; 437 if (bootinfo.bi_kernend != 0) 438 physfree = bootinfo.bi_kernend; 439 physfree = roundup2(physfree, NBPDR); 440 KERNend = physfree; 441 442 /* Allocate Kernel Page Tables */ 443 KPTphys = allocpages(NKPT, &physfree); 444 KPTmap = (pt_entry_t *)KPTphys; 445 446 /* Allocate Page Table Directory */ 447 #ifdef PMAP_PAE_COMP 448 /* XXX only need 32 bytes (easier for now) */ 449 IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree); 450 #endif 451 IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree); 452 453 /* 454 * Allocate KSTACK. Leave a guard page between IdlePTD and 455 * proc0kstack, to control stack overflow for thread0 and 456 * prevent corruption of the page table. We leak the guard 457 * physical memory due to 1:1 mappings. 458 */ 459 allocpages(1, &physfree); 460 proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree); 461 462 /* vm86/bios stack */ 463 vm86phystk = allocpages(1, &physfree); 464 465 /* pgtable + ext + IOPAGES */ 466 vm86paddr = vm86pa = allocpages(3, &physfree); 467 468 /* Install page tables into PTD. Page table page 1 is wasted. */ 469 for (a = 0; a < NKPT; a++) 470 IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M; 471 472 #ifdef PMAP_PAE_COMP 473 /* PAE install PTD pointers into PDPT */ 474 for (a = 0; a < NPGPTD; a++) 475 IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V; 476 #endif 477 478 /* 479 * Install recursive mapping for kernel page tables into 480 * itself. 481 */ 482 for (a = 0; a < NPGPTD; a++) 483 IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V | 484 PG_RW; 485 486 /* 487 * Initialize page table pages mapping physical address zero 488 * through the (physical) end of the kernel. Many of these 489 * pages must be reserved, and we reserve them all and map 490 * them linearly for convenience. We do this even if we've 491 * enabled PSE above; we'll just switch the corresponding 492 * kernel PDEs before we turn on paging. 493 * 494 * This and all other page table entries allow read and write 495 * access for various reasons. Kernel mappings never have any 496 * access restrictions. 497 */ 498 pmap_cold_mapident(0, atop(NBPDR) * LOWPTDI); 499 pmap_cold_map(0, NBPDR * LOWPTDI, atop(NBPDR) * LOWPTDI); 500 pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE)); 501 502 /* Map page table directory */ 503 #ifdef PMAP_PAE_COMP 504 pmap_cold_mapident((u_long)IdlePDPT, 1); 505 #endif 506 pmap_cold_mapident((u_long)IdlePTD, NPGPTD); 507 508 /* Map early KPTmap. It is really pmap_cold_mapident. */ 509 pmap_cold_map(KPTphys, (u_long)KPTmap, NKPT); 510 511 /* Map proc0kstack */ 512 pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES); 513 /* ISA hole already mapped */ 514 515 pmap_cold_mapident(vm86phystk, 1); 516 pmap_cold_mapident(vm86pa, 3); 517 518 /* Map page 0 into the vm86 page table */ 519 *(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V; 520 521 /* ...likewise for the ISA hole for vm86 */ 522 for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0; 523 a < atop(ISA_HOLE_LENGTH); a++, pt++) 524 *pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A | 525 PG_M | PG_V; 526 527 /* Enable PSE, PGE, VME, and PAE if configured. */ 528 ncr4 = 0; 529 if ((cpu_feature & CPUID_PSE) != 0) { 530 ncr4 |= CR4_PSE; 531 pseflag = PG_PS; 532 /* 533 * Superpage mapping of the kernel text. Existing 4k 534 * page table pages are wasted. 535 */ 536 for (a = KERNBASE; a < KERNend; a += NBPDR) 537 IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M | 538 PG_RW | PG_V; 539 } 540 if ((cpu_feature & CPUID_PGE) != 0) { 541 ncr4 |= CR4_PGE; 542 pgeflag = PG_G; 543 } 544 ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0; 545 #ifdef PMAP_PAE_COMP 546 ncr4 |= CR4_PAE; 547 #endif 548 if (ncr4 != 0) 549 load_cr4(rcr4() | ncr4); 550 551 /* Now enable paging */ 552 #ifdef PMAP_PAE_COMP 553 cr3 = (u_int)IdlePDPT; 554 if ((cpu_feature & CPUID_PAT) == 0) 555 wbinvd(); 556 #else 557 cr3 = (u_int)IdlePTD; 558 #endif 559 tramp_idleptd = cr3; 560 load_cr3(cr3); 561 load_cr0(rcr0() | CR0_PG); 562 563 /* 564 * Now running relocated at KERNBASE where the system is 565 * linked to run. 566 */ 567 568 /* 569 * Remove the lowest part of the double mapping of low memory 570 * to get some null pointer checks. 571 */ 572 __CONCAT(PMTYPE, remap_lower)(false); 573 574 kernel_vm_end = /* 0 + */ NKPT * NBPDR; 575 #ifdef PMAP_PAE_COMP 576 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_PAE; 577 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_PAE; 578 i386_pmap_PDRSHIFT = PDRSHIFT_PAE; 579 #else 580 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_NOPAE; 581 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_NOPAE; 582 i386_pmap_PDRSHIFT = PDRSHIFT_NOPAE; 583 #endif 584 } 585 586 static void 587 __CONCAT(PMTYPE, set_nx)(void) 588 { 589 590 #ifdef PMAP_PAE_COMP 591 if ((amd_feature & AMDID_NX) == 0) 592 return; 593 pg_nx = PG_NX; 594 elf32_nxstack = 1; 595 /* EFER.EFER_NXE is set in initializecpu(). */ 596 #endif 597 } 598 599 /* 600 * Bootstrap the system enough to run with virtual memory. 601 * 602 * On the i386 this is called after pmap_cold() created initial 603 * kernel page table and enabled paging, and just syncs the pmap 604 * module with what has already been done. 605 */ 606 static void 607 __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr) 608 { 609 vm_offset_t va; 610 pt_entry_t *pte, *unused __unused; 611 struct pcpu *pc; 612 u_long res; 613 int i; 614 615 res = atop(firstaddr - (vm_paddr_t)KERNLOAD); 616 617 /* 618 * Add a physical memory segment (vm_phys_seg) corresponding to the 619 * preallocated kernel page table pages so that vm_page structures 620 * representing these pages will be created. The vm_page structures 621 * are required for promotion of the corresponding kernel virtual 622 * addresses to superpage mappings. 623 */ 624 vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 625 626 /* 627 * Initialize the first available kernel virtual address. 628 * However, using "firstaddr" may waste a few pages of the 629 * kernel virtual address space, because pmap_cold() may not 630 * have mapped every physical page that it allocated. 631 * Preferably, pmap_cold() would provide a first unused 632 * virtual address in addition to "firstaddr". 633 */ 634 virtual_avail = (vm_offset_t)firstaddr; 635 virtual_end = VM_MAX_KERNEL_ADDRESS; 636 637 /* 638 * Initialize the kernel pmap (which is statically allocated). 639 * Count bootstrap data as being resident in case any of this data is 640 * later unmapped (using pmap_remove()) and freed. 641 */ 642 PMAP_LOCK_INIT(kernel_pmap); 643 kernel_pmap->pm_pdir = IdlePTD; 644 #ifdef PMAP_PAE_COMP 645 kernel_pmap->pm_pdpt = IdlePDPT; 646 #endif 647 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 648 kernel_pmap->pm_stats.resident_count = res; 649 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 650 vm_radix_init(&kernel_pmap->pm_root); 651 652 /* 653 * Initialize the global pv list lock. 654 */ 655 rw_init(&pvh_global_lock, "pmap pv global"); 656 657 /* 658 * Reserve some special page table entries/VA space for temporary 659 * mapping of pages. 660 */ 661 #define SYSMAP(c, p, v, n) \ 662 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 663 664 va = virtual_avail; 665 pte = vtopte(va); 666 667 /* 668 * Initialize temporary map objects on the current CPU for use 669 * during early boot. 670 * CMAP1/CMAP2 are used for zeroing and copying pages. 671 * CMAP3 is used for the boot-time memory test. 672 */ 673 pc = get_pcpu(); 674 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 675 SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1) 676 SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1) 677 SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1) 678 679 SYSMAP(caddr_t, CMAP3, CADDR3, 1); 680 681 /* 682 * Crashdump maps. 683 */ 684 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 685 686 /* 687 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 688 */ 689 SYSMAP(caddr_t, unused, ptvmmap, 1) 690 691 /* 692 * msgbufp is used to map the system message buffer. 693 */ 694 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 695 696 /* 697 * KPTmap is used by pmap_kextract(). 698 * 699 * KPTmap is first initialized by pmap_cold(). However, that initial 700 * KPTmap can only support NKPT page table pages. Here, a larger 701 * KPTmap is created that can support KVA_PAGES page table pages. 702 */ 703 SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) 704 705 for (i = 0; i < NKPT; i++) 706 KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V; 707 708 /* 709 * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), 710 * respectively. 711 */ 712 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 713 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 714 SYSMAP(pt_entry_t *, PMAP3, PADDR3, 1) 715 716 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 717 718 virtual_avail = va; 719 720 /* 721 * Initialize the PAT MSR if present. 722 * pmap_init_pat() clears and sets CR4_PGE, which, as a 723 * side-effect, invalidates stale PG_G TLB entries that might 724 * have been created in our pre-boot environment. We assume 725 * that PAT support implies PGE and in reverse, PGE presence 726 * comes with PAT. Both features were added for Pentium Pro. 727 */ 728 pmap_init_pat(); 729 } 730 731 static void 732 pmap_init_reserved_pages(void) 733 { 734 struct pcpu *pc; 735 vm_offset_t pages; 736 int i; 737 738 #ifdef PMAP_PAE_COMP 739 if (!pae_mode) 740 return; 741 #else 742 if (pae_mode) 743 return; 744 #endif 745 CPU_FOREACH(i) { 746 pc = pcpu_find(i); 747 mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | 748 MTX_NEW); 749 pc->pc_copyout_maddr = kva_alloc(ptoa(2)); 750 if (pc->pc_copyout_maddr == 0) 751 panic("unable to allocate non-sleepable copyout KVA"); 752 sx_init(&pc->pc_copyout_slock, "cpslk"); 753 pc->pc_copyout_saddr = kva_alloc(ptoa(2)); 754 if (pc->pc_copyout_saddr == 0) 755 panic("unable to allocate sleepable copyout KVA"); 756 pc->pc_pmap_eh_va = kva_alloc(ptoa(1)); 757 if (pc->pc_pmap_eh_va == 0) 758 panic("unable to allocate pmap_extract_and_hold KVA"); 759 pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va); 760 761 /* 762 * Skip if the mappings have already been initialized, 763 * i.e. this is the BSP. 764 */ 765 if (pc->pc_cmap_addr1 != 0) 766 continue; 767 768 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 769 pages = kva_alloc(PAGE_SIZE * 3); 770 if (pages == 0) 771 panic("unable to allocate CMAP KVA"); 772 pc->pc_cmap_pte1 = vtopte(pages); 773 pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); 774 pc->pc_cmap_addr1 = (caddr_t)pages; 775 pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); 776 pc->pc_qmap_addr = pages + ptoa(2); 777 } 778 } 779 780 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 781 782 /* 783 * Setup the PAT MSR. 784 */ 785 static void 786 __CONCAT(PMTYPE, init_pat)(void) 787 { 788 int pat_table[PAT_INDEX_SIZE]; 789 uint64_t pat_msr; 790 u_long cr0, cr4; 791 int i; 792 793 /* Set default PAT index table. */ 794 for (i = 0; i < PAT_INDEX_SIZE; i++) 795 pat_table[i] = -1; 796 pat_table[PAT_WRITE_BACK] = 0; 797 pat_table[PAT_WRITE_THROUGH] = 1; 798 pat_table[PAT_UNCACHEABLE] = 3; 799 pat_table[PAT_WRITE_COMBINING] = 3; 800 pat_table[PAT_WRITE_PROTECTED] = 3; 801 pat_table[PAT_UNCACHED] = 3; 802 803 /* 804 * Bail if this CPU doesn't implement PAT. 805 * We assume that PAT support implies PGE. 806 */ 807 if ((cpu_feature & CPUID_PAT) == 0) { 808 for (i = 0; i < PAT_INDEX_SIZE; i++) 809 pat_index[i] = pat_table[i]; 810 pat_works = 0; 811 return; 812 } 813 814 /* 815 * Due to some Intel errata, we can only safely use the lower 4 816 * PAT entries. 817 * 818 * Intel Pentium III Processor Specification Update 819 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 820 * or Mode C Paging) 821 * 822 * Intel Pentium IV Processor Specification Update 823 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 824 */ 825 if (cpu_vendor_id == CPU_VENDOR_INTEL && 826 !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) 827 pat_works = 0; 828 829 /* Initialize default PAT entries. */ 830 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | 831 PAT_VALUE(1, PAT_WRITE_THROUGH) | 832 PAT_VALUE(2, PAT_UNCACHED) | 833 PAT_VALUE(3, PAT_UNCACHEABLE) | 834 PAT_VALUE(4, PAT_WRITE_BACK) | 835 PAT_VALUE(5, PAT_WRITE_THROUGH) | 836 PAT_VALUE(6, PAT_UNCACHED) | 837 PAT_VALUE(7, PAT_UNCACHEABLE); 838 839 if (pat_works) { 840 /* 841 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. 842 * Program 5 and 6 as WP and WC. 843 * Leave 4 and 7 as WB and UC. 844 */ 845 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); 846 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | 847 PAT_VALUE(6, PAT_WRITE_COMBINING); 848 pat_table[PAT_UNCACHED] = 2; 849 pat_table[PAT_WRITE_PROTECTED] = 5; 850 pat_table[PAT_WRITE_COMBINING] = 6; 851 } else { 852 /* 853 * Just replace PAT Index 2 with WC instead of UC-. 854 */ 855 pat_msr &= ~PAT_MASK(2); 856 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 857 pat_table[PAT_WRITE_COMBINING] = 2; 858 } 859 860 /* Disable PGE. */ 861 cr4 = rcr4(); 862 load_cr4(cr4 & ~CR4_PGE); 863 864 /* Disable caches (CD = 1, NW = 0). */ 865 cr0 = rcr0(); 866 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 867 868 /* Flushes caches and TLBs. */ 869 wbinvd(); 870 invltlb(); 871 872 /* Update PAT and index table. */ 873 wrmsr(MSR_PAT, pat_msr); 874 for (i = 0; i < PAT_INDEX_SIZE; i++) 875 pat_index[i] = pat_table[i]; 876 877 /* Flush caches and TLBs again. */ 878 wbinvd(); 879 invltlb(); 880 881 /* Restore caches and PGE. */ 882 load_cr0(cr0); 883 load_cr4(cr4); 884 } 885 886 #ifdef PMAP_PAE_COMP 887 static void * 888 pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 889 int wait) 890 { 891 892 /* Inform UMA that this allocator uses kernel_map/object. */ 893 *flags = UMA_SLAB_KERNEL; 894 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 895 bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 896 } 897 #endif 898 899 /* 900 * Abuse the pte nodes for unmapped kva to thread a kva freelist through. 901 * Requirements: 902 * - Must deal with pages in order to ensure that none of the PG_* bits 903 * are ever set, PG_V in particular. 904 * - Assumes we can write to ptes without pte_store() atomic ops, even 905 * on PAE systems. This should be ok. 906 * - Assumes nothing will ever test these addresses for 0 to indicate 907 * no mapping instead of correctly checking PG_V. 908 * - Assumes a vm_offset_t will fit in a pte (true for i386). 909 * Because PG_V is never set, there can be no mappings to invalidate. 910 */ 911 static vm_offset_t 912 pmap_ptelist_alloc(vm_offset_t *head) 913 { 914 pt_entry_t *pte; 915 vm_offset_t va; 916 917 va = *head; 918 if (va == 0) 919 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 920 pte = vtopte(va); 921 *head = *pte; 922 if (*head & PG_V) 923 panic("pmap_ptelist_alloc: va with PG_V set!"); 924 *pte = 0; 925 return (va); 926 } 927 928 static void 929 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 930 { 931 pt_entry_t *pte; 932 933 if (va & PG_V) 934 panic("pmap_ptelist_free: freeing va with PG_V set!"); 935 pte = vtopte(va); 936 *pte = *head; /* virtual! PG_V is 0 though */ 937 *head = va; 938 } 939 940 static void 941 pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 942 { 943 int i; 944 vm_offset_t va; 945 946 *head = 0; 947 for (i = npages - 1; i >= 0; i--) { 948 va = (vm_offset_t)base + i * PAGE_SIZE; 949 pmap_ptelist_free(head, va); 950 } 951 } 952 953 /* 954 * Initialize the pmap module. 955 * Called by vm_init, to initialize any structures that the pmap 956 * system needs to map virtual memory. 957 */ 958 static void 959 __CONCAT(PMTYPE, init)(void) 960 { 961 struct pmap_preinit_mapping *ppim; 962 vm_page_t mpte; 963 vm_size_t s; 964 int i, pv_npg; 965 966 /* 967 * Initialize the vm page array entries for the kernel pmap's 968 * page table pages. 969 */ 970 PMAP_LOCK(kernel_pmap); 971 for (i = 0; i < NKPT; i++) { 972 mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); 973 KASSERT(mpte >= vm_page_array && 974 mpte < &vm_page_array[vm_page_array_size], 975 ("pmap_init: page table page is out of range")); 976 mpte->pindex = i + KPTDI; 977 mpte->phys_addr = KPTphys + ptoa(i); 978 mpte->ref_count = 1; 979 980 /* 981 * Collect the page table pages that were replaced by a 2/4MB 982 * page. They are filled with equivalent 4KB page mappings. 983 */ 984 if (pseflag != 0 && 985 KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && 986 pmap_insert_pt_page(kernel_pmap, mpte, true, true)) 987 panic("pmap_init: pmap_insert_pt_page failed"); 988 } 989 PMAP_UNLOCK(kernel_pmap); 990 vm_wire_add(NKPT); 991 992 /* 993 * Initialize the address space (zone) for the pv entries. Set a 994 * high water mark so that the system can recover from excessive 995 * numbers of pv entries. 996 */ 997 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 998 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 999 TUNABLE_INT_FETCH("vm.pmap.pv_entry_max", &pv_entry_max); 1000 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1001 pv_entry_high_water = 9 * (pv_entry_max / 10); 1002 1003 /* 1004 * If the kernel is running on a virtual machine, then it must assume 1005 * that MCA is enabled by the hypervisor. Moreover, the kernel must 1006 * be prepared for the hypervisor changing the vendor and family that 1007 * are reported by CPUID. Consequently, the workaround for AMD Family 1008 * 10h Erratum 383 is enabled if the processor's feature set does not 1009 * include at least one feature that is only supported by older Intel 1010 * or newer AMD processors. 1011 */ 1012 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 && 1013 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | 1014 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | 1015 AMDID2_FMA4)) == 0) 1016 workaround_erratum383 = 1; 1017 1018 /* 1019 * Are large page mappings supported and enabled? 1020 */ 1021 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 1022 if (pseflag == 0) 1023 pg_ps_enabled = 0; 1024 else if (pg_ps_enabled) { 1025 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1026 ("pmap_init: can't assign to pagesizes[1]")); 1027 pagesizes[1] = NBPDR; 1028 } 1029 1030 /* 1031 * Calculate the size of the pv head table for superpages. 1032 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1033 */ 1034 pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end - 1035 PAGE_SIZE) / NBPDR + 1; 1036 1037 /* 1038 * Allocate memory for the pv head table for superpages. 1039 */ 1040 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1041 s = round_page(s); 1042 pv_table = kmem_malloc(s, M_WAITOK | M_ZERO); 1043 for (i = 0; i < pv_npg; i++) 1044 TAILQ_INIT(&pv_table[i].pv_list); 1045 1046 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1047 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1048 if (pv_chunkbase == NULL) 1049 panic("pmap_init: not enough kvm for pv chunks"); 1050 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1051 #ifdef PMAP_PAE_COMP 1052 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 1053 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 1054 UMA_ZONE_CONTIG | UMA_ZONE_VM | UMA_ZONE_NOFREE); 1055 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 1056 #endif 1057 1058 pmap_initialized = 1; 1059 pmap_init_trm(); 1060 1061 if (!bootverbose) 1062 return; 1063 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 1064 ppim = pmap_preinit_mapping + i; 1065 if (ppim->va == 0) 1066 continue; 1067 printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, 1068 (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); 1069 } 1070 1071 } 1072 1073 extern u_long pmap_pde_demotions; 1074 extern u_long pmap_pde_mappings; 1075 extern u_long pmap_pde_p_failures; 1076 extern u_long pmap_pde_promotions; 1077 1078 /*************************************************** 1079 * Low level helper routines..... 1080 ***************************************************/ 1081 1082 static boolean_t 1083 __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode) 1084 { 1085 1086 return (mode >= 0 && mode < PAT_INDEX_SIZE && 1087 pat_index[(int)mode] >= 0); 1088 } 1089 1090 /* 1091 * Determine the appropriate bits to set in a PTE or PDE for a specified 1092 * caching mode. 1093 */ 1094 static int 1095 __CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde) 1096 { 1097 int cache_bits, pat_flag, pat_idx; 1098 1099 if (!pmap_is_valid_memattr(pmap, mode)) 1100 panic("Unknown caching mode %d\n", mode); 1101 1102 /* The PAT bit is different for PTE's and PDE's. */ 1103 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1104 1105 /* Map the caching mode to a PAT index. */ 1106 pat_idx = pat_index[mode]; 1107 1108 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 1109 cache_bits = 0; 1110 if (pat_idx & 0x4) 1111 cache_bits |= pat_flag; 1112 if (pat_idx & 0x2) 1113 cache_bits |= PG_NC_PCD; 1114 if (pat_idx & 0x1) 1115 cache_bits |= PG_NC_PWT; 1116 return (cache_bits); 1117 } 1118 1119 static int 1120 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde) 1121 { 1122 int pat_flag, pat_idx; 1123 1124 if ((cpu_feature & CPUID_PAT) == 0) 1125 return (0); 1126 1127 pat_idx = 0; 1128 /* The PAT bit is different for PTE's and PDE's. */ 1129 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1130 1131 if ((pte & pat_flag) != 0) 1132 pat_idx |= 0x4; 1133 if ((pte & PG_NC_PCD) != 0) 1134 pat_idx |= 0x2; 1135 if ((pte & PG_NC_PWT) != 0) 1136 pat_idx |= 0x1; 1137 1138 /* See pmap_init_pat(). */ 1139 if (pat_works) { 1140 if (pat_idx == 4) 1141 pat_idx = 0; 1142 if (pat_idx == 7) 1143 pat_idx = 3; 1144 } else { 1145 /* XXXKIB */ 1146 } 1147 1148 return (pat_idx); 1149 } 1150 1151 static bool 1152 __CONCAT(PMTYPE, ps_enabled)(pmap_t pmap __unused) 1153 { 1154 1155 return (pg_ps_enabled); 1156 } 1157 1158 /* 1159 * The caller is responsible for maintaining TLB consistency. 1160 */ 1161 static void 1162 pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) 1163 { 1164 pd_entry_t *pde; 1165 1166 pde = pmap_pde(kernel_pmap, va); 1167 pde_store(pde, newpde); 1168 } 1169 1170 /* 1171 * After changing the page size for the specified virtual address in the page 1172 * table, flush the corresponding entries from the processor's TLB. Only the 1173 * calling processor's TLB is affected. 1174 * 1175 * The calling thread must be pinned to a processor. 1176 */ 1177 static void 1178 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) 1179 { 1180 1181 if ((newpde & PG_PS) == 0) 1182 /* Demotion: flush a specific 2MB page mapping. */ 1183 invlpg(va); 1184 else /* if ((newpde & PG_G) == 0) */ 1185 /* 1186 * Promotion: flush every 4KB page mapping from the TLB 1187 * because there are too many to flush individually. 1188 */ 1189 invltlb(); 1190 } 1191 1192 #ifdef SMP 1193 1194 static void 1195 pmap_curcpu_cb_dummy(pmap_t pmap __unused, vm_offset_t addr1 __unused, 1196 vm_offset_t addr2 __unused) 1197 { 1198 } 1199 1200 /* 1201 * For SMP, these functions have to use the IPI mechanism for coherence. 1202 * 1203 * N.B.: Before calling any of the following TLB invalidation functions, 1204 * the calling processor must ensure that all stores updating a non- 1205 * kernel page table are globally performed. Otherwise, another 1206 * processor could cache an old, pre-update entry without being 1207 * invalidated. This can happen one of two ways: (1) The pmap becomes 1208 * active on another processor after its pm_active field is checked by 1209 * one of the following functions but before a store updating the page 1210 * table is globally performed. (2) The pmap becomes active on another 1211 * processor before its pm_active field is checked but due to 1212 * speculative loads one of the following functions stills reads the 1213 * pmap as inactive on the other processor. 1214 * 1215 * The kernel page table is exempt because its pm_active field is 1216 * immutable. The kernel page table is always active on every 1217 * processor. 1218 */ 1219 static void 1220 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1221 { 1222 cpuset_t *mask, other_cpus; 1223 u_int cpuid; 1224 1225 sched_pin(); 1226 if (pmap == kernel_pmap) { 1227 invlpg(va); 1228 mask = &all_cpus; 1229 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1230 mask = &all_cpus; 1231 } else { 1232 cpuid = PCPU_GET(cpuid); 1233 other_cpus = all_cpus; 1234 CPU_CLR(cpuid, &other_cpus); 1235 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1236 mask = &other_cpus; 1237 } 1238 smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy); 1239 sched_unpin(); 1240 } 1241 1242 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ 1243 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) 1244 1245 static void 1246 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1247 { 1248 cpuset_t *mask, other_cpus; 1249 vm_offset_t addr; 1250 u_int cpuid; 1251 1252 if (eva - sva >= PMAP_INVLPG_THRESHOLD) { 1253 pmap_invalidate_all_int(pmap); 1254 return; 1255 } 1256 1257 sched_pin(); 1258 if (pmap == kernel_pmap) { 1259 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1260 invlpg(addr); 1261 mask = &all_cpus; 1262 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1263 mask = &all_cpus; 1264 } else { 1265 cpuid = PCPU_GET(cpuid); 1266 other_cpus = all_cpus; 1267 CPU_CLR(cpuid, &other_cpus); 1268 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1269 mask = &other_cpus; 1270 } 1271 smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy); 1272 sched_unpin(); 1273 } 1274 1275 static void 1276 pmap_invalidate_all_int(pmap_t pmap) 1277 { 1278 cpuset_t *mask, other_cpus; 1279 u_int cpuid; 1280 1281 sched_pin(); 1282 if (pmap == kernel_pmap) { 1283 invltlb(); 1284 mask = &all_cpus; 1285 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1286 mask = &all_cpus; 1287 } else { 1288 cpuid = PCPU_GET(cpuid); 1289 other_cpus = all_cpus; 1290 CPU_CLR(cpuid, &other_cpus); 1291 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1292 mask = &other_cpus; 1293 } 1294 smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy); 1295 sched_unpin(); 1296 } 1297 1298 static void 1299 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, 1300 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) 1301 { 1302 wbinvd(); 1303 } 1304 1305 static void 1306 __CONCAT(PMTYPE, invalidate_cache)(void) 1307 { 1308 smp_cache_flush(pmap_invalidate_cache_curcpu_cb); 1309 } 1310 1311 struct pde_action { 1312 cpuset_t invalidate; /* processors that invalidate their TLB */ 1313 vm_offset_t va; 1314 pd_entry_t *pde; 1315 pd_entry_t newpde; 1316 u_int store; /* processor that updates the PDE */ 1317 }; 1318 1319 static void 1320 pmap_update_pde_kernel(void *arg) 1321 { 1322 struct pde_action *act = arg; 1323 pd_entry_t *pde; 1324 1325 if (act->store == PCPU_GET(cpuid)) { 1326 pde = pmap_pde(kernel_pmap, act->va); 1327 pde_store(pde, act->newpde); 1328 } 1329 } 1330 1331 static void 1332 pmap_update_pde_user(void *arg) 1333 { 1334 struct pde_action *act = arg; 1335 1336 if (act->store == PCPU_GET(cpuid)) 1337 pde_store(act->pde, act->newpde); 1338 } 1339 1340 static void 1341 pmap_update_pde_teardown(void *arg) 1342 { 1343 struct pde_action *act = arg; 1344 1345 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) 1346 pmap_update_pde_invalidate(act->va, act->newpde); 1347 } 1348 1349 /* 1350 * Change the page size for the specified virtual address in a way that 1351 * prevents any possibility of the TLB ever having two entries that map the 1352 * same virtual address using different page sizes. This is the recommended 1353 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a 1354 * machine check exception for a TLB state that is improperly diagnosed as a 1355 * hardware error. 1356 */ 1357 static void 1358 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1359 { 1360 struct pde_action act; 1361 cpuset_t active, other_cpus; 1362 u_int cpuid; 1363 1364 sched_pin(); 1365 cpuid = PCPU_GET(cpuid); 1366 other_cpus = all_cpus; 1367 CPU_CLR(cpuid, &other_cpus); 1368 if (pmap == kernel_pmap) 1369 active = all_cpus; 1370 else 1371 active = pmap->pm_active; 1372 if (CPU_OVERLAP(&active, &other_cpus)) { 1373 act.store = cpuid; 1374 act.invalidate = active; 1375 act.va = va; 1376 act.pde = pde; 1377 act.newpde = newpde; 1378 CPU_SET(cpuid, &active); 1379 smp_rendezvous_cpus(active, 1380 smp_no_rendezvous_barrier, pmap == kernel_pmap ? 1381 pmap_update_pde_kernel : pmap_update_pde_user, 1382 pmap_update_pde_teardown, &act); 1383 } else { 1384 if (pmap == kernel_pmap) 1385 pmap_kenter_pde(va, newpde); 1386 else 1387 pde_store(pde, newpde); 1388 if (CPU_ISSET(cpuid, &active)) 1389 pmap_update_pde_invalidate(va, newpde); 1390 } 1391 sched_unpin(); 1392 } 1393 #else /* !SMP */ 1394 /* 1395 * Normal, non-SMP, 486+ invalidation functions. 1396 * We inline these within pmap.c for speed. 1397 */ 1398 static void 1399 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1400 { 1401 1402 if (pmap == kernel_pmap) 1403 invlpg(va); 1404 } 1405 1406 static void 1407 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1408 { 1409 vm_offset_t addr; 1410 1411 if (pmap == kernel_pmap) 1412 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1413 invlpg(addr); 1414 } 1415 1416 static void 1417 pmap_invalidate_all_int(pmap_t pmap) 1418 { 1419 1420 if (pmap == kernel_pmap) 1421 invltlb(); 1422 } 1423 1424 static void 1425 __CONCAT(PMTYPE, invalidate_cache)(void) 1426 { 1427 1428 wbinvd(); 1429 } 1430 1431 static void 1432 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1433 { 1434 1435 if (pmap == kernel_pmap) 1436 pmap_kenter_pde(va, newpde); 1437 else 1438 pde_store(pde, newpde); 1439 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1440 pmap_update_pde_invalidate(va, newpde); 1441 } 1442 #endif /* !SMP */ 1443 1444 static void 1445 __CONCAT(PMTYPE, invalidate_page)(pmap_t pmap, vm_offset_t va) 1446 { 1447 1448 pmap_invalidate_page_int(pmap, va); 1449 } 1450 1451 static void 1452 __CONCAT(PMTYPE, invalidate_range)(pmap_t pmap, vm_offset_t sva, 1453 vm_offset_t eva) 1454 { 1455 1456 pmap_invalidate_range_int(pmap, sva, eva); 1457 } 1458 1459 static void 1460 __CONCAT(PMTYPE, invalidate_all)(pmap_t pmap) 1461 { 1462 1463 pmap_invalidate_all_int(pmap); 1464 } 1465 1466 static void 1467 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1468 { 1469 1470 /* 1471 * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was 1472 * created by a promotion that did not invalidate the 512 or 1024 4KB 1473 * page mappings that might exist in the TLB. Consequently, at this 1474 * point, the TLB may hold both 4KB and 2- or 4MB page mappings for 1475 * the address range [va, va + NBPDR). Therefore, the entire range 1476 * must be invalidated here. In contrast, when PG_PROMOTED is clear, 1477 * the TLB will not hold any 4KB page mappings for the address range 1478 * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the 1479 * 2- or 4MB page mapping from the TLB. 1480 */ 1481 if ((pde & PG_PROMOTED) != 0) 1482 pmap_invalidate_range_int(pmap, va, va + NBPDR - 1); 1483 else 1484 pmap_invalidate_page_int(pmap, va); 1485 } 1486 1487 /* 1488 * Are we current address space or kernel? 1489 */ 1490 static __inline int 1491 pmap_is_current(pmap_t pmap) 1492 { 1493 1494 return (pmap == kernel_pmap); 1495 } 1496 1497 /* 1498 * If the given pmap is not the current or kernel pmap, the returned pte must 1499 * be released by passing it to pmap_pte_release(). 1500 */ 1501 static pt_entry_t * 1502 __CONCAT(PMTYPE, pte)(pmap_t pmap, vm_offset_t va) 1503 { 1504 pd_entry_t newpf; 1505 pd_entry_t *pde; 1506 1507 pde = pmap_pde(pmap, va); 1508 if (*pde & PG_PS) 1509 return (pde); 1510 if (*pde != 0) { 1511 /* are we current address space or kernel? */ 1512 if (pmap_is_current(pmap)) 1513 return (vtopte(va)); 1514 mtx_lock(&PMAP2mutex); 1515 newpf = *pde & PG_FRAME; 1516 if ((*PMAP2 & PG_FRAME) != newpf) { 1517 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; 1518 pmap_invalidate_page_int(kernel_pmap, 1519 (vm_offset_t)PADDR2); 1520 } 1521 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1522 } 1523 return (NULL); 1524 } 1525 1526 /* 1527 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1528 * being NULL. 1529 */ 1530 static __inline void 1531 pmap_pte_release(pt_entry_t *pte) 1532 { 1533 1534 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) 1535 mtx_unlock(&PMAP2mutex); 1536 } 1537 1538 /* 1539 * NB: The sequence of updating a page table followed by accesses to the 1540 * corresponding pages is subject to the situation described in the "AMD64 1541 * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23, 1542 * "7.3.1 Special Coherency Considerations". Therefore, issuing the INVLPG 1543 * right after modifying the PTE bits is crucial. 1544 */ 1545 static __inline void 1546 invlcaddr(void *caddr) 1547 { 1548 1549 invlpg((u_int)caddr); 1550 } 1551 1552 /* 1553 * Super fast pmap_pte routine best used when scanning 1554 * the pv lists. This eliminates many coarse-grained 1555 * invltlb calls. Note that many of the pv list 1556 * scans are across different pmaps. It is very wasteful 1557 * to do an entire invltlb for checking a single mapping. 1558 * 1559 * If the given pmap is not the current pmap, pvh_global_lock 1560 * must be held and curthread pinned to a CPU. 1561 */ 1562 static pt_entry_t * 1563 pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1564 { 1565 pd_entry_t newpf; 1566 pd_entry_t *pde; 1567 1568 pde = pmap_pde(pmap, va); 1569 if (*pde & PG_PS) 1570 return (pde); 1571 if (*pde != 0) { 1572 /* are we current address space or kernel? */ 1573 if (pmap_is_current(pmap)) 1574 return (vtopte(va)); 1575 rw_assert(&pvh_global_lock, RA_WLOCKED); 1576 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1577 newpf = *pde & PG_FRAME; 1578 if ((*PMAP1 & PG_FRAME) != newpf) { 1579 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; 1580 #ifdef SMP 1581 PMAP1cpu = PCPU_GET(cpuid); 1582 #endif 1583 invlcaddr(PADDR1); 1584 PMAP1changed++; 1585 } else 1586 #ifdef SMP 1587 if (PMAP1cpu != PCPU_GET(cpuid)) { 1588 PMAP1cpu = PCPU_GET(cpuid); 1589 invlcaddr(PADDR1); 1590 PMAP1changedcpu++; 1591 } else 1592 #endif 1593 PMAP1unchanged++; 1594 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1595 } 1596 return (0); 1597 } 1598 1599 static pt_entry_t * 1600 pmap_pte_quick3(pmap_t pmap, vm_offset_t va) 1601 { 1602 pd_entry_t newpf; 1603 pd_entry_t *pde; 1604 1605 pde = pmap_pde(pmap, va); 1606 if (*pde & PG_PS) 1607 return (pde); 1608 if (*pde != 0) { 1609 rw_assert(&pvh_global_lock, RA_WLOCKED); 1610 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1611 newpf = *pde & PG_FRAME; 1612 if ((*PMAP3 & PG_FRAME) != newpf) { 1613 *PMAP3 = newpf | PG_RW | PG_V | PG_A | PG_M; 1614 #ifdef SMP 1615 PMAP3cpu = PCPU_GET(cpuid); 1616 #endif 1617 invlcaddr(PADDR3); 1618 PMAP1changed++; 1619 } else 1620 #ifdef SMP 1621 if (PMAP3cpu != PCPU_GET(cpuid)) { 1622 PMAP3cpu = PCPU_GET(cpuid); 1623 invlcaddr(PADDR3); 1624 PMAP1changedcpu++; 1625 } else 1626 #endif 1627 PMAP1unchanged++; 1628 return (PADDR3 + (i386_btop(va) & (NPTEPG - 1))); 1629 } 1630 return (0); 1631 } 1632 1633 static pt_entry_t 1634 pmap_pte_ufast(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1635 { 1636 pt_entry_t *eh_ptep, pte, *ptep; 1637 1638 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1639 pde &= PG_FRAME; 1640 critical_enter(); 1641 eh_ptep = (pt_entry_t *)PCPU_GET(pmap_eh_ptep); 1642 if ((*eh_ptep & PG_FRAME) != pde) { 1643 *eh_ptep = pde | PG_RW | PG_V | PG_A | PG_M; 1644 invlcaddr((void *)PCPU_GET(pmap_eh_va)); 1645 } 1646 ptep = (pt_entry_t *)PCPU_GET(pmap_eh_va) + (i386_btop(va) & 1647 (NPTEPG - 1)); 1648 pte = *ptep; 1649 critical_exit(); 1650 return (pte); 1651 } 1652 1653 /* 1654 * Extract from the kernel page table the physical address that is mapped by 1655 * the given virtual address "va". 1656 * 1657 * This function may be used before pmap_bootstrap() is called. 1658 */ 1659 static vm_paddr_t 1660 __CONCAT(PMTYPE, kextract)(vm_offset_t va) 1661 { 1662 vm_paddr_t pa; 1663 1664 if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) { 1665 pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 1666 } else { 1667 /* 1668 * Beware of a concurrent promotion that changes the PDE at 1669 * this point! For example, vtopte() must not be used to 1670 * access the PTE because it would use the new PDE. It is, 1671 * however, safe to use the old PDE because the page table 1672 * page is preserved by the promotion. 1673 */ 1674 pa = KPTmap[i386_btop(va)]; 1675 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1676 } 1677 return (pa); 1678 } 1679 1680 /* 1681 * Routine: pmap_extract 1682 * Function: 1683 * Extract the physical page address associated 1684 * with the given map/virtual_address pair. 1685 */ 1686 static vm_paddr_t 1687 __CONCAT(PMTYPE, extract)(pmap_t pmap, vm_offset_t va) 1688 { 1689 vm_paddr_t rtval; 1690 pt_entry_t pte; 1691 pd_entry_t pde; 1692 1693 rtval = 0; 1694 PMAP_LOCK(pmap); 1695 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1696 if (pde != 0) { 1697 if ((pde & PG_PS) != 0) 1698 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); 1699 else { 1700 pte = pmap_pte_ufast(pmap, va, pde); 1701 rtval = (pte & PG_FRAME) | (va & PAGE_MASK); 1702 } 1703 } 1704 PMAP_UNLOCK(pmap); 1705 return (rtval); 1706 } 1707 1708 /* 1709 * Routine: pmap_extract_and_hold 1710 * Function: 1711 * Atomically extract and hold the physical page 1712 * with the given pmap and virtual address pair 1713 * if that mapping permits the given protection. 1714 */ 1715 static vm_page_t 1716 __CONCAT(PMTYPE, extract_and_hold)(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1717 { 1718 pd_entry_t pde; 1719 pt_entry_t pte; 1720 vm_page_t m; 1721 1722 m = NULL; 1723 PMAP_LOCK(pmap); 1724 pde = *pmap_pde(pmap, va); 1725 if (pde != 0) { 1726 if (pde & PG_PS) { 1727 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) 1728 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1729 (va & PDRMASK)); 1730 } else { 1731 pte = pmap_pte_ufast(pmap, va, pde); 1732 if (pte != 0 && 1733 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) 1734 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1735 } 1736 if (m != NULL && !vm_page_wire_mapped(m)) 1737 m = NULL; 1738 } 1739 PMAP_UNLOCK(pmap); 1740 return (m); 1741 } 1742 1743 /*************************************************** 1744 * Low level mapping routines..... 1745 ***************************************************/ 1746 1747 /* 1748 * Add a wired page to the kva. 1749 * Note: not SMP coherent. 1750 * 1751 * This function may be used before pmap_bootstrap() is called. 1752 */ 1753 static void 1754 __CONCAT(PMTYPE, kenter)(vm_offset_t va, vm_paddr_t pa) 1755 { 1756 pt_entry_t *pte; 1757 1758 pte = vtopte(va); 1759 pte_store(pte, pa | PG_RW | PG_V); 1760 } 1761 1762 static __inline void 1763 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1764 { 1765 pt_entry_t *pte; 1766 1767 pte = vtopte(va); 1768 pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, 1769 mode, 0)); 1770 } 1771 1772 /* 1773 * Remove a page from the kernel pagetables. 1774 * Note: not SMP coherent. 1775 * 1776 * This function may be used before pmap_bootstrap() is called. 1777 */ 1778 static void 1779 __CONCAT(PMTYPE, kremove)(vm_offset_t va) 1780 { 1781 pt_entry_t *pte; 1782 1783 pte = vtopte(va); 1784 pte_clear(pte); 1785 } 1786 1787 /* 1788 * Used to map a range of physical addresses into kernel 1789 * virtual address space. 1790 * 1791 * The value passed in '*virt' is a suggested virtual address for 1792 * the mapping. Architectures which can support a direct-mapped 1793 * physical to virtual region can return the appropriate address 1794 * within that region, leaving '*virt' unchanged. Other 1795 * architectures should map the pages starting at '*virt' and 1796 * update '*virt' with the first usable address after the mapped 1797 * region. 1798 */ 1799 static vm_offset_t 1800 __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1801 int prot) 1802 { 1803 vm_offset_t va, sva; 1804 vm_paddr_t superpage_offset; 1805 pd_entry_t newpde; 1806 1807 va = *virt; 1808 /* 1809 * Does the physical address range's size and alignment permit at 1810 * least one superpage mapping to be created? 1811 */ 1812 superpage_offset = start & PDRMASK; 1813 if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) { 1814 /* 1815 * Increase the starting virtual address so that its alignment 1816 * does not preclude the use of superpage mappings. 1817 */ 1818 if ((va & PDRMASK) < superpage_offset) 1819 va = (va & ~PDRMASK) + superpage_offset; 1820 else if ((va & PDRMASK) > superpage_offset) 1821 va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; 1822 } 1823 sva = va; 1824 while (start < end) { 1825 if ((start & PDRMASK) == 0 && end - start >= NBPDR && 1826 pseflag != 0) { 1827 KASSERT((va & PDRMASK) == 0, 1828 ("pmap_map: misaligned va %#x", va)); 1829 newpde = start | PG_PS | PG_RW | PG_V; 1830 pmap_kenter_pde(va, newpde); 1831 va += NBPDR; 1832 start += NBPDR; 1833 } else { 1834 pmap_kenter(va, start); 1835 va += PAGE_SIZE; 1836 start += PAGE_SIZE; 1837 } 1838 } 1839 pmap_invalidate_range_int(kernel_pmap, sva, va); 1840 *virt = va; 1841 return (sva); 1842 } 1843 1844 /* 1845 * Add a list of wired pages to the kva 1846 * this routine is only used for temporary 1847 * kernel mappings that do not need to have 1848 * page modification or references recorded. 1849 * Note that old mappings are simply written 1850 * over. The page *must* be wired. 1851 * Note: SMP coherent. Uses a ranged shootdown IPI. 1852 */ 1853 static void 1854 __CONCAT(PMTYPE, qenter)(vm_offset_t sva, vm_page_t *ma, int count) 1855 { 1856 pt_entry_t *endpte, oldpte, pa, *pte; 1857 vm_page_t m; 1858 1859 oldpte = 0; 1860 pte = vtopte(sva); 1861 endpte = pte + count; 1862 while (pte < endpte) { 1863 m = *ma++; 1864 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap, 1865 m->md.pat_mode, 0); 1866 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { 1867 oldpte |= *pte; 1868 pte_store(pte, pa | pg_nx | PG_RW | PG_V); 1869 } 1870 pte++; 1871 } 1872 if (__predict_false((oldpte & PG_V) != 0)) 1873 pmap_invalidate_range_int(kernel_pmap, sva, sva + count * 1874 PAGE_SIZE); 1875 } 1876 1877 /* 1878 * This routine tears out page mappings from the 1879 * kernel -- it is meant only for temporary mappings. 1880 * Note: SMP coherent. Uses a ranged shootdown IPI. 1881 */ 1882 static void 1883 __CONCAT(PMTYPE, qremove)(vm_offset_t sva, int count) 1884 { 1885 vm_offset_t va; 1886 1887 va = sva; 1888 while (count-- > 0) { 1889 pmap_kremove(va); 1890 va += PAGE_SIZE; 1891 } 1892 pmap_invalidate_range_int(kernel_pmap, sva, va); 1893 } 1894 1895 /*************************************************** 1896 * Page table page management routines..... 1897 ***************************************************/ 1898 /* 1899 * Schedule the specified unused page table page to be freed. Specifically, 1900 * add the page to the specified list of pages that will be released to the 1901 * physical memory manager after the TLB has been updated. 1902 */ 1903 static __inline void 1904 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 1905 boolean_t set_PG_ZERO) 1906 { 1907 1908 if (set_PG_ZERO) 1909 m->flags |= PG_ZERO; 1910 else 1911 m->flags &= ~PG_ZERO; 1912 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 1913 } 1914 1915 /* 1916 * Inserts the specified page table page into the specified pmap's collection 1917 * of idle page table pages. Each of a pmap's page table pages is responsible 1918 * for mapping a distinct range of virtual addresses. The pmap's collection is 1919 * ordered by this virtual address range. 1920 * 1921 * If "promoted" is false, then the page table page "mpte" must be zero filled; 1922 * "mpte"'s valid field will be set to 0. 1923 * 1924 * If "promoted" is true and "allpte_PG_A_set" is false, then "mpte" must 1925 * contain valid mappings with identical attributes except for PG_A; "mpte"'s 1926 * valid field will be set to 1. 1927 * 1928 * If "promoted" and "allpte_PG_A_set" are both true, then "mpte" must contain 1929 * valid mappings with identical attributes including PG_A; "mpte"'s valid 1930 * field will be set to VM_PAGE_BITS_ALL. 1931 */ 1932 static __inline int 1933 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted, 1934 bool allpte_PG_A_set) 1935 { 1936 1937 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1938 KASSERT(promoted || !allpte_PG_A_set, 1939 ("a zero-filled PTP can't have PG_A set in every PTE")); 1940 mpte->valid = promoted ? (allpte_PG_A_set ? VM_PAGE_BITS_ALL : 1) : 0; 1941 return (vm_radix_insert(&pmap->pm_root, mpte)); 1942 } 1943 1944 /* 1945 * Removes the page table page mapping the specified virtual address from the 1946 * specified pmap's collection of idle page table pages, and returns it. 1947 * Otherwise, returns NULL if there is no page table page corresponding to the 1948 * specified virtual address. 1949 */ 1950 static __inline vm_page_t 1951 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) 1952 { 1953 1954 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1955 return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT)); 1956 } 1957 1958 /* 1959 * Decrements a page table page's reference count, which is used to record the 1960 * number of valid page table entries within the page. If the reference count 1961 * drops to zero, then the page table page is unmapped. Returns TRUE if the 1962 * page table page was unmapped and FALSE otherwise. 1963 */ 1964 static inline boolean_t 1965 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1966 { 1967 1968 --m->ref_count; 1969 if (m->ref_count == 0) { 1970 _pmap_unwire_ptp(pmap, m, free); 1971 return (TRUE); 1972 } else 1973 return (FALSE); 1974 } 1975 1976 static void 1977 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1978 { 1979 1980 /* 1981 * unmap the page table page 1982 */ 1983 pmap->pm_pdir[m->pindex] = 0; 1984 --pmap->pm_stats.resident_count; 1985 1986 /* 1987 * There is not need to invalidate the recursive mapping since 1988 * we never instantiate such mapping for the usermode pmaps, 1989 * and never remove page table pages from the kernel pmap. 1990 * Put page on a list so that it is released since all TLB 1991 * shootdown is done. 1992 */ 1993 MPASS(pmap != kernel_pmap); 1994 pmap_add_delayed_free_list(m, free, TRUE); 1995 } 1996 1997 /* 1998 * After removing a page table entry, this routine is used to 1999 * conditionally free the page, and manage the reference count. 2000 */ 2001 static int 2002 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) 2003 { 2004 pd_entry_t ptepde; 2005 vm_page_t mpte; 2006 2007 if (pmap == kernel_pmap) 2008 return (0); 2009 ptepde = *pmap_pde(pmap, va); 2010 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 2011 return (pmap_unwire_ptp(pmap, mpte, free)); 2012 } 2013 2014 /* 2015 * Release a page table page reference after a failed attempt to create a 2016 * mapping. 2017 */ 2018 static void 2019 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 2020 { 2021 struct spglist free; 2022 2023 SLIST_INIT(&free); 2024 if (pmap_unwire_ptp(pmap, mpte, &free)) { 2025 /* 2026 * Although "va" was never mapped, paging-structure caches 2027 * could nonetheless have entries that refer to the freed 2028 * page table pages. Invalidate those entries. 2029 */ 2030 pmap_invalidate_page_int(pmap, va); 2031 vm_page_free_pages_toq(&free, true); 2032 } 2033 } 2034 2035 /* 2036 * Initialize the pmap for the swapper process. 2037 */ 2038 static void 2039 __CONCAT(PMTYPE, pinit0)(pmap_t pmap) 2040 { 2041 2042 PMAP_LOCK_INIT(pmap); 2043 pmap->pm_pdir = IdlePTD; 2044 #ifdef PMAP_PAE_COMP 2045 pmap->pm_pdpt = IdlePDPT; 2046 #endif 2047 vm_radix_init(&pmap->pm_root); 2048 CPU_ZERO(&pmap->pm_active); 2049 TAILQ_INIT(&pmap->pm_pvchunk); 2050 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2051 pmap_activate_boot(pmap); 2052 } 2053 2054 /* 2055 * Initialize a preallocated and zeroed pmap structure, 2056 * such as one in a vmspace structure. 2057 */ 2058 static int 2059 __CONCAT(PMTYPE, pinit)(pmap_t pmap) 2060 { 2061 int i; 2062 2063 /* 2064 * No need to allocate page table space yet but we do need a valid 2065 * page directory table. 2066 */ 2067 if (pmap->pm_pdir == NULL) { 2068 pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); 2069 if (pmap->pm_pdir == NULL) 2070 return (0); 2071 #ifdef PMAP_PAE_COMP 2072 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 2073 KASSERT(((vm_offset_t)pmap->pm_pdpt & 2074 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 2075 ("pmap_pinit: pdpt misaligned")); 2076 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 2077 ("pmap_pinit: pdpt above 4g")); 2078 #endif 2079 vm_radix_init(&pmap->pm_root); 2080 } 2081 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2082 ("pmap_pinit: pmap has reserved page table page(s)")); 2083 2084 /* 2085 * allocate the page directory page(s) 2086 */ 2087 for (i = 0; i < NPGPTD; i++) { 2088 pmap->pm_ptdpg[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED | 2089 VM_ALLOC_ZERO | VM_ALLOC_WAITOK); 2090 #ifdef PMAP_PAE_COMP 2091 pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(pmap->pm_ptdpg[i]) | PG_V; 2092 #endif 2093 } 2094 2095 pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD); 2096 #ifdef PMAP_PAE_COMP 2097 if ((cpu_feature & CPUID_PAT) == 0) { 2098 pmap_invalidate_cache_range( 2099 trunc_page((vm_offset_t)pmap->pm_pdpt), 2100 round_page((vm_offset_t)pmap->pm_pdpt + 2101 NPGPTD * sizeof(pdpt_entry_t))); 2102 } 2103 #endif 2104 2105 /* Install the trampoline mapping. */ 2106 pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; 2107 2108 CPU_ZERO(&pmap->pm_active); 2109 TAILQ_INIT(&pmap->pm_pvchunk); 2110 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2111 2112 return (1); 2113 } 2114 2115 /* 2116 * this routine is called if the page table page is not 2117 * mapped correctly. 2118 */ 2119 static vm_page_t 2120 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) 2121 { 2122 vm_paddr_t ptepa; 2123 vm_page_t m; 2124 2125 /* 2126 * Allocate a page table page. 2127 */ 2128 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 2129 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2130 PMAP_UNLOCK(pmap); 2131 rw_wunlock(&pvh_global_lock); 2132 vm_wait(NULL); 2133 rw_wlock(&pvh_global_lock); 2134 PMAP_LOCK(pmap); 2135 } 2136 2137 /* 2138 * Indicate the need to retry. While waiting, the page table 2139 * page may have been allocated. 2140 */ 2141 return (NULL); 2142 } 2143 m->pindex = ptepindex; 2144 2145 /* 2146 * Map the pagetable page into the process address space, if 2147 * it isn't already there. 2148 */ 2149 2150 pmap->pm_stats.resident_count++; 2151 2152 ptepa = VM_PAGE_TO_PHYS(m); 2153 KASSERT((pmap->pm_pdir[ptepindex] & PG_V) == 0, 2154 ("%s: page directory entry %#jx is valid", 2155 __func__, (uintmax_t)pmap->pm_pdir[ptepindex])); 2156 pmap->pm_pdir[ptepindex] = 2157 (pd_entry_t)(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 2158 2159 return (m); 2160 } 2161 2162 static vm_page_t 2163 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) 2164 { 2165 u_int ptepindex; 2166 pd_entry_t ptepa; 2167 vm_page_t m; 2168 2169 /* 2170 * Calculate pagetable page index 2171 */ 2172 ptepindex = va >> PDRSHIFT; 2173 retry: 2174 /* 2175 * Get the page directory entry 2176 */ 2177 ptepa = pmap->pm_pdir[ptepindex]; 2178 2179 /* 2180 * This supports switching from a 4MB page to a 2181 * normal 4K page. 2182 */ 2183 if (ptepa & PG_PS) { 2184 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va); 2185 ptepa = pmap->pm_pdir[ptepindex]; 2186 } 2187 2188 /* 2189 * If the page table page is mapped, we just increment the 2190 * hold count, and activate it. 2191 */ 2192 if (ptepa) { 2193 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 2194 m->ref_count++; 2195 } else { 2196 /* 2197 * Here if the pte page isn't mapped, or if it has 2198 * been deallocated. 2199 */ 2200 m = _pmap_allocpte(pmap, ptepindex, flags); 2201 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2202 goto retry; 2203 } 2204 return (m); 2205 } 2206 2207 /*************************************************** 2208 * Pmap allocation/deallocation routines. 2209 ***************************************************/ 2210 2211 /* 2212 * Release any resources held by the given physical map. 2213 * Called when a pmap initialized by pmap_pinit is being released. 2214 * Should only be called if the map contains no valid mappings. 2215 */ 2216 static void 2217 __CONCAT(PMTYPE, release)(pmap_t pmap) 2218 { 2219 vm_page_t m; 2220 int i; 2221 2222 KASSERT(pmap->pm_stats.resident_count == 0, 2223 ("pmap_release: pmap resident count %ld != 0", 2224 pmap->pm_stats.resident_count)); 2225 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2226 ("pmap_release: pmap has reserved page table page(s)")); 2227 KASSERT(CPU_EMPTY(&pmap->pm_active), 2228 ("releasing active pmap %p", pmap)); 2229 2230 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 2231 2232 for (i = 0; i < NPGPTD; i++) { 2233 m = pmap->pm_ptdpg[i]; 2234 #ifdef PMAP_PAE_COMP 2235 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 2236 ("pmap_release: got wrong ptd page")); 2237 #endif 2238 vm_page_unwire_noq(m); 2239 vm_page_free(m); 2240 } 2241 } 2242 2243 /* 2244 * grow the number of kernel page table entries, if needed 2245 */ 2246 static void 2247 __CONCAT(PMTYPE, growkernel)(vm_offset_t addr) 2248 { 2249 vm_paddr_t ptppaddr; 2250 vm_page_t nkpg; 2251 pd_entry_t newpdir; 2252 2253 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2254 addr = roundup2(addr, NBPDR); 2255 if (addr - 1 >= vm_map_max(kernel_map)) 2256 addr = vm_map_max(kernel_map); 2257 while (kernel_vm_end < addr) { 2258 if (pdir_pde(PTD, kernel_vm_end)) { 2259 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2260 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2261 kernel_vm_end = vm_map_max(kernel_map); 2262 break; 2263 } 2264 continue; 2265 } 2266 2267 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | 2268 VM_ALLOC_ZERO); 2269 if (nkpg == NULL) 2270 panic("pmap_growkernel: no memory to grow kernel"); 2271 nkpg->pindex = kernel_vm_end >> PDRSHIFT; 2272 nkpt++; 2273 2274 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 2275 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 2276 pdir_pde(KPTD, kernel_vm_end) = newpdir; 2277 2278 pmap_kenter_pde(kernel_vm_end, newpdir); 2279 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2280 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2281 kernel_vm_end = vm_map_max(kernel_map); 2282 break; 2283 } 2284 } 2285 } 2286 2287 /*************************************************** 2288 * page management routines. 2289 ***************************************************/ 2290 2291 static const uint32_t pc_freemask[_NPCM] = { 2292 [0 ... _NPCM - 2] = PC_FREEN, 2293 [_NPCM - 1] = PC_FREEL 2294 }; 2295 2296 #ifdef PV_STATS 2297 extern int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2298 extern long pv_entry_frees, pv_entry_allocs; 2299 extern int pv_entry_spare; 2300 #endif 2301 2302 /* 2303 * We are in a serious low memory condition. Resort to 2304 * drastic measures to free some pages so we can allocate 2305 * another pv entry chunk. 2306 */ 2307 static vm_page_t 2308 pmap_pv_reclaim(pmap_t locked_pmap) 2309 { 2310 struct pch newtail; 2311 struct pv_chunk *pc; 2312 struct md_page *pvh; 2313 pd_entry_t *pde; 2314 pmap_t pmap; 2315 pt_entry_t *pte, tpte; 2316 pv_entry_t pv; 2317 vm_offset_t va; 2318 vm_page_t m, m_pc; 2319 struct spglist free; 2320 uint32_t inuse; 2321 int bit, field, freed; 2322 2323 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2324 pmap = NULL; 2325 m_pc = NULL; 2326 SLIST_INIT(&free); 2327 TAILQ_INIT(&newtail); 2328 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2329 SLIST_EMPTY(&free))) { 2330 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2331 if (pmap != pc->pc_pmap) { 2332 if (pmap != NULL) { 2333 pmap_invalidate_all_int(pmap); 2334 if (pmap != locked_pmap) 2335 PMAP_UNLOCK(pmap); 2336 } 2337 pmap = pc->pc_pmap; 2338 /* Avoid deadlock and lock recursion. */ 2339 if (pmap > locked_pmap) 2340 PMAP_LOCK(pmap); 2341 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2342 pmap = NULL; 2343 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2344 continue; 2345 } 2346 } 2347 2348 /* 2349 * Destroy every non-wired, 4 KB page mapping in the chunk. 2350 */ 2351 freed = 0; 2352 for (field = 0; field < _NPCM; field++) { 2353 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2354 inuse != 0; inuse &= ~(1UL << bit)) { 2355 bit = bsfl(inuse); 2356 pv = &pc->pc_pventry[field * 32 + bit]; 2357 va = pv->pv_va; 2358 pde = pmap_pde(pmap, va); 2359 if ((*pde & PG_PS) != 0) 2360 continue; 2361 pte = __CONCAT(PMTYPE, pte)(pmap, va); 2362 tpte = *pte; 2363 if ((tpte & PG_W) == 0) 2364 tpte = pte_load_clear(pte); 2365 pmap_pte_release(pte); 2366 if ((tpte & PG_W) != 0) 2367 continue; 2368 KASSERT(tpte != 0, 2369 ("pmap_pv_reclaim: pmap %p va %x zero pte", 2370 pmap, va)); 2371 if ((tpte & PG_G) != 0) 2372 pmap_invalidate_page_int(pmap, va); 2373 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2374 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2375 vm_page_dirty(m); 2376 if ((tpte & PG_A) != 0) 2377 vm_page_aflag_set(m, PGA_REFERENCED); 2378 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2379 if (TAILQ_EMPTY(&m->md.pv_list) && 2380 (m->flags & PG_FICTITIOUS) == 0) { 2381 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2382 if (TAILQ_EMPTY(&pvh->pv_list)) { 2383 vm_page_aflag_clear(m, 2384 PGA_WRITEABLE); 2385 } 2386 } 2387 pc->pc_map[field] |= 1UL << bit; 2388 pmap_unuse_pt(pmap, va, &free); 2389 freed++; 2390 } 2391 } 2392 if (freed == 0) { 2393 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2394 continue; 2395 } 2396 /* Every freed mapping is for a 4 KB page. */ 2397 pmap->pm_stats.resident_count -= freed; 2398 PV_STAT(pv_entry_frees += freed); 2399 PV_STAT(pv_entry_spare += freed); 2400 pv_entry_count -= freed; 2401 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2402 for (field = 0; field < _NPCM; field++) 2403 if (pc->pc_map[field] != pc_freemask[field]) { 2404 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2405 pc_list); 2406 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2407 2408 /* 2409 * One freed pv entry in locked_pmap is 2410 * sufficient. 2411 */ 2412 if (pmap == locked_pmap) 2413 goto out; 2414 break; 2415 } 2416 if (field == _NPCM) { 2417 PV_STAT(pv_entry_spare -= _NPCPV); 2418 PV_STAT(pc_chunk_count--); 2419 PV_STAT(pc_chunk_frees++); 2420 /* Entire chunk is free; return it. */ 2421 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2422 pmap_qremove((vm_offset_t)pc, 1); 2423 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2424 break; 2425 } 2426 } 2427 out: 2428 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2429 if (pmap != NULL) { 2430 pmap_invalidate_all_int(pmap); 2431 if (pmap != locked_pmap) 2432 PMAP_UNLOCK(pmap); 2433 } 2434 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2435 m_pc = SLIST_FIRST(&free); 2436 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2437 /* Recycle a freed page table page. */ 2438 m_pc->ref_count = 1; 2439 } 2440 vm_page_free_pages_toq(&free, true); 2441 return (m_pc); 2442 } 2443 2444 /* 2445 * free the pv_entry back to the free list 2446 */ 2447 static void 2448 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2449 { 2450 struct pv_chunk *pc; 2451 int idx, field, bit; 2452 2453 rw_assert(&pvh_global_lock, RA_WLOCKED); 2454 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2455 PV_STAT(pv_entry_frees++); 2456 PV_STAT(pv_entry_spare++); 2457 pv_entry_count--; 2458 pc = pv_to_chunk(pv); 2459 idx = pv - &pc->pc_pventry[0]; 2460 field = idx / 32; 2461 bit = idx % 32; 2462 pc->pc_map[field] |= 1ul << bit; 2463 for (idx = 0; idx < _NPCM; idx++) 2464 if (pc->pc_map[idx] != pc_freemask[idx]) { 2465 /* 2466 * 98% of the time, pc is already at the head of the 2467 * list. If it isn't already, move it to the head. 2468 */ 2469 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2470 pc)) { 2471 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2472 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2473 pc_list); 2474 } 2475 return; 2476 } 2477 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2478 free_pv_chunk(pc); 2479 } 2480 2481 static void 2482 free_pv_chunk(struct pv_chunk *pc) 2483 { 2484 vm_page_t m; 2485 2486 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2487 PV_STAT(pv_entry_spare -= _NPCPV); 2488 PV_STAT(pc_chunk_count--); 2489 PV_STAT(pc_chunk_frees++); 2490 /* entire chunk is free, return it */ 2491 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2492 pmap_qremove((vm_offset_t)pc, 1); 2493 vm_page_unwire_noq(m); 2494 vm_page_free(m); 2495 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2496 } 2497 2498 /* 2499 * get a new pv_entry, allocating a block from the system 2500 * when needed. 2501 */ 2502 static pv_entry_t 2503 get_pv_entry(pmap_t pmap, boolean_t try) 2504 { 2505 static const struct timeval printinterval = { 60, 0 }; 2506 static struct timeval lastprint; 2507 int bit, field; 2508 pv_entry_t pv; 2509 struct pv_chunk *pc; 2510 vm_page_t m; 2511 2512 rw_assert(&pvh_global_lock, RA_WLOCKED); 2513 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2514 PV_STAT(pv_entry_allocs++); 2515 pv_entry_count++; 2516 if (pv_entry_count > pv_entry_high_water) 2517 if (ratecheck(&lastprint, &printinterval)) 2518 printf("Approaching the limit on PV entries, consider " 2519 "increasing either the vm.pmap.shpgperproc or the " 2520 "vm.pmap.pv_entry_max tunable.\n"); 2521 retry: 2522 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2523 if (pc != NULL) { 2524 for (field = 0; field < _NPCM; field++) { 2525 if (pc->pc_map[field]) { 2526 bit = bsfl(pc->pc_map[field]); 2527 break; 2528 } 2529 } 2530 if (field < _NPCM) { 2531 pv = &pc->pc_pventry[field * 32 + bit]; 2532 pc->pc_map[field] &= ~(1ul << bit); 2533 /* If this was the last item, move it to tail */ 2534 for (field = 0; field < _NPCM; field++) 2535 if (pc->pc_map[field] != 0) { 2536 PV_STAT(pv_entry_spare--); 2537 return (pv); /* not full, return */ 2538 } 2539 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2540 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2541 PV_STAT(pv_entry_spare--); 2542 return (pv); 2543 } 2544 } 2545 /* 2546 * Access to the ptelist "pv_vafree" is synchronized by the pvh 2547 * global lock. If "pv_vafree" is currently non-empty, it will 2548 * remain non-empty until pmap_ptelist_alloc() completes. 2549 */ 2550 if (pv_vafree == 0 || 2551 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2552 if (try) { 2553 pv_entry_count--; 2554 PV_STAT(pc_chunk_tryfail++); 2555 return (NULL); 2556 } 2557 m = pmap_pv_reclaim(pmap); 2558 if (m == NULL) 2559 goto retry; 2560 } 2561 PV_STAT(pc_chunk_count++); 2562 PV_STAT(pc_chunk_allocs++); 2563 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2564 pmap_qenter((vm_offset_t)pc, &m, 1); 2565 pc->pc_pmap = pmap; 2566 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2567 for (field = 1; field < _NPCM; field++) 2568 pc->pc_map[field] = pc_freemask[field]; 2569 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2570 pv = &pc->pc_pventry[0]; 2571 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2572 PV_STAT(pv_entry_spare += _NPCPV - 1); 2573 return (pv); 2574 } 2575 2576 static __inline pv_entry_t 2577 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2578 { 2579 pv_entry_t pv; 2580 2581 rw_assert(&pvh_global_lock, RA_WLOCKED); 2582 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 2583 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2584 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 2585 break; 2586 } 2587 } 2588 return (pv); 2589 } 2590 2591 static void 2592 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2593 { 2594 struct md_page *pvh; 2595 pv_entry_t pv; 2596 vm_offset_t va_last; 2597 vm_page_t m; 2598 2599 rw_assert(&pvh_global_lock, RA_WLOCKED); 2600 KASSERT((pa & PDRMASK) == 0, 2601 ("pmap_pv_demote_pde: pa is not 4mpage aligned")); 2602 2603 /* 2604 * Transfer the 4mpage's pv entry for this mapping to the first 2605 * page's pv list. 2606 */ 2607 pvh = pa_to_pvh(pa); 2608 va = trunc_4mpage(va); 2609 pv = pmap_pvh_remove(pvh, pmap, va); 2610 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 2611 m = PHYS_TO_VM_PAGE(pa); 2612 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2613 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 2614 va_last = va + NBPDR - PAGE_SIZE; 2615 do { 2616 m++; 2617 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2618 ("pmap_pv_demote_pde: page %p is not managed", m)); 2619 va += PAGE_SIZE; 2620 pmap_insert_entry(pmap, va, m); 2621 } while (va < va_last); 2622 } 2623 2624 #if VM_NRESERVLEVEL > 0 2625 static void 2626 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2627 { 2628 struct md_page *pvh; 2629 pv_entry_t pv; 2630 vm_offset_t va_last; 2631 vm_page_t m; 2632 2633 rw_assert(&pvh_global_lock, RA_WLOCKED); 2634 KASSERT((pa & PDRMASK) == 0, 2635 ("pmap_pv_promote_pde: pa is not 4mpage aligned")); 2636 2637 /* 2638 * Transfer the first page's pv entry for this mapping to the 2639 * 4mpage's pv list. Aside from avoiding the cost of a call 2640 * to get_pv_entry(), a transfer avoids the possibility that 2641 * get_pv_entry() calls pmap_collect() and that pmap_collect() 2642 * removes one of the mappings that is being promoted. 2643 */ 2644 m = PHYS_TO_VM_PAGE(pa); 2645 va = trunc_4mpage(va); 2646 pv = pmap_pvh_remove(&m->md, pmap, va); 2647 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 2648 pvh = pa_to_pvh(pa); 2649 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2650 /* Free the remaining NPTEPG - 1 pv entries. */ 2651 va_last = va + NBPDR - PAGE_SIZE; 2652 do { 2653 m++; 2654 va += PAGE_SIZE; 2655 pmap_pvh_free(&m->md, pmap, va); 2656 } while (va < va_last); 2657 } 2658 #endif /* VM_NRESERVLEVEL > 0 */ 2659 2660 static void 2661 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2662 { 2663 pv_entry_t pv; 2664 2665 pv = pmap_pvh_remove(pvh, pmap, va); 2666 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2667 free_pv_entry(pmap, pv); 2668 } 2669 2670 static void 2671 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2672 { 2673 struct md_page *pvh; 2674 2675 rw_assert(&pvh_global_lock, RA_WLOCKED); 2676 pmap_pvh_free(&m->md, pmap, va); 2677 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 2678 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2679 if (TAILQ_EMPTY(&pvh->pv_list)) 2680 vm_page_aflag_clear(m, PGA_WRITEABLE); 2681 } 2682 } 2683 2684 /* 2685 * Create a pv entry for page at pa for 2686 * (pmap, va). 2687 */ 2688 static void 2689 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2690 { 2691 pv_entry_t pv; 2692 2693 rw_assert(&pvh_global_lock, RA_WLOCKED); 2694 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2695 pv = get_pv_entry(pmap, FALSE); 2696 pv->pv_va = va; 2697 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2698 } 2699 2700 /* 2701 * Conditionally create a pv entry. 2702 */ 2703 static boolean_t 2704 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2705 { 2706 pv_entry_t pv; 2707 2708 rw_assert(&pvh_global_lock, RA_WLOCKED); 2709 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2710 if (pv_entry_count < pv_entry_high_water && 2711 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2712 pv->pv_va = va; 2713 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2714 return (TRUE); 2715 } else 2716 return (FALSE); 2717 } 2718 2719 /* 2720 * Create the pv entries for each of the pages within a superpage. 2721 */ 2722 static bool 2723 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags) 2724 { 2725 struct md_page *pvh; 2726 pv_entry_t pv; 2727 bool noreclaim; 2728 2729 rw_assert(&pvh_global_lock, RA_WLOCKED); 2730 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 2731 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 2732 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 2733 return (false); 2734 pv->pv_va = va; 2735 pvh = pa_to_pvh(pde & PG_PS_FRAME); 2736 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2737 return (true); 2738 } 2739 2740 /* 2741 * Fills a page table page with mappings to consecutive physical pages. 2742 */ 2743 static void 2744 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 2745 { 2746 pt_entry_t *pte; 2747 2748 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 2749 *pte = newpte; 2750 newpte += PAGE_SIZE; 2751 } 2752 } 2753 2754 /* 2755 * Tries to demote a 2- or 4MB page mapping. If demotion fails, the 2756 * 2- or 4MB page mapping is invalidated. 2757 */ 2758 static boolean_t 2759 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2760 { 2761 pd_entry_t newpde, oldpde; 2762 pt_entry_t *firstpte, newpte; 2763 vm_paddr_t mptepa; 2764 vm_page_t mpte; 2765 struct spglist free; 2766 vm_offset_t sva; 2767 2768 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2769 oldpde = *pde; 2770 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), 2771 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); 2772 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == 2773 NULL) { 2774 KASSERT((oldpde & PG_W) == 0, 2775 ("pmap_demote_pde: page table page for a wired mapping" 2776 " is missing")); 2777 2778 /* 2779 * Invalidate the 2- or 4MB page mapping and return 2780 * "failure" if the mapping was never accessed or the 2781 * allocation of the new page table page fails. 2782 */ 2783 if ((oldpde & PG_A) == 0 || 2784 (mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2785 SLIST_INIT(&free); 2786 sva = trunc_4mpage(va); 2787 pmap_remove_pde(pmap, pde, sva, &free); 2788 if ((oldpde & PG_G) == 0) 2789 pmap_invalidate_pde_page(pmap, sva, oldpde); 2790 vm_page_free_pages_toq(&free, true); 2791 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" 2792 " in pmap %p", va, pmap); 2793 return (FALSE); 2794 } 2795 mpte->pindex = va >> PDRSHIFT; 2796 if (pmap != kernel_pmap) { 2797 mpte->ref_count = NPTEPG; 2798 pmap->pm_stats.resident_count++; 2799 } 2800 } 2801 mptepa = VM_PAGE_TO_PHYS(mpte); 2802 2803 /* 2804 * If the page mapping is in the kernel's address space, then the 2805 * KPTmap can provide access to the page table page. Otherwise, 2806 * temporarily map the page table page (mpte) into the kernel's 2807 * address space at either PADDR1 or PADDR2. 2808 */ 2809 if (pmap == kernel_pmap) 2810 firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; 2811 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 2812 if ((*PMAP1 & PG_FRAME) != mptepa) { 2813 *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2814 #ifdef SMP 2815 PMAP1cpu = PCPU_GET(cpuid); 2816 #endif 2817 invlcaddr(PADDR1); 2818 PMAP1changed++; 2819 } else 2820 #ifdef SMP 2821 if (PMAP1cpu != PCPU_GET(cpuid)) { 2822 PMAP1cpu = PCPU_GET(cpuid); 2823 invlcaddr(PADDR1); 2824 PMAP1changedcpu++; 2825 } else 2826 #endif 2827 PMAP1unchanged++; 2828 firstpte = PADDR1; 2829 } else { 2830 mtx_lock(&PMAP2mutex); 2831 if ((*PMAP2 & PG_FRAME) != mptepa) { 2832 *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2833 pmap_invalidate_page_int(kernel_pmap, 2834 (vm_offset_t)PADDR2); 2835 } 2836 firstpte = PADDR2; 2837 } 2838 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; 2839 KASSERT((oldpde & PG_A) != 0, 2840 ("pmap_demote_pde: oldpde is missing PG_A")); 2841 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 2842 ("pmap_demote_pde: oldpde is missing PG_M")); 2843 newpte = oldpde & ~PG_PS; 2844 if ((newpte & PG_PDE_PAT) != 0) 2845 newpte ^= PG_PDE_PAT | PG_PTE_PAT; 2846 2847 /* 2848 * If the PTP is not leftover from an earlier promotion or it does not 2849 * have PG_A set in every PTE, then fill it. The new PTEs will all 2850 * have PG_A set. 2851 */ 2852 if (!vm_page_all_valid(mpte)) 2853 pmap_fill_ptp(firstpte, newpte); 2854 2855 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), 2856 ("pmap_demote_pde: firstpte and newpte map different physical" 2857 " addresses")); 2858 2859 /* 2860 * If the mapping has changed attributes, update the PTEs. 2861 */ 2862 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) 2863 pmap_fill_ptp(firstpte, newpte); 2864 2865 /* 2866 * Demote the mapping. This pmap is locked. The old PDE has 2867 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 2868 * set. Thus, there is no danger of a race with another 2869 * processor changing the setting of PG_A and/or PG_M between 2870 * the read above and the store below. 2871 */ 2872 if (workaround_erratum383) 2873 pmap_update_pde(pmap, va, pde, newpde); 2874 else if (pmap == kernel_pmap) 2875 pmap_kenter_pde(va, newpde); 2876 else 2877 pde_store(pde, newpde); 2878 if (firstpte == PADDR2) 2879 mtx_unlock(&PMAP2mutex); 2880 2881 /* 2882 * Invalidate the recursive mapping of the page table page. 2883 */ 2884 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2885 2886 /* 2887 * Demote the pv entry. This depends on the earlier demotion 2888 * of the mapping. Specifically, the (re)creation of a per- 2889 * page pv entry might trigger the execution of pmap_collect(), 2890 * which might reclaim a newly (re)created per-page pv entry 2891 * and destroy the associated mapping. In order to destroy 2892 * the mapping, the PDE must have already changed from mapping 2893 * the 2mpage to referencing the page table page. 2894 */ 2895 if ((oldpde & PG_MANAGED) != 0) 2896 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); 2897 2898 pmap_pde_demotions++; 2899 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" 2900 " in pmap %p", va, pmap); 2901 return (TRUE); 2902 } 2903 2904 /* 2905 * Removes a 2- or 4MB page mapping from the kernel pmap. 2906 */ 2907 static void 2908 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2909 { 2910 pd_entry_t newpde; 2911 vm_paddr_t mptepa; 2912 vm_page_t mpte; 2913 2914 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2915 mpte = pmap_remove_pt_page(pmap, va); 2916 if (mpte == NULL) 2917 panic("pmap_remove_kernel_pde: Missing pt page."); 2918 2919 mptepa = VM_PAGE_TO_PHYS(mpte); 2920 newpde = mptepa | PG_M | PG_A | PG_RW | PG_V; 2921 2922 /* 2923 * If this page table page was unmapped by a promotion, then it 2924 * contains valid mappings. Zero it to invalidate those mappings. 2925 */ 2926 if (vm_page_any_valid(mpte)) 2927 pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]); 2928 2929 /* 2930 * Remove the mapping. 2931 */ 2932 if (workaround_erratum383) 2933 pmap_update_pde(pmap, va, pde, newpde); 2934 else 2935 pmap_kenter_pde(va, newpde); 2936 2937 /* 2938 * Invalidate the recursive mapping of the page table page. 2939 */ 2940 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2941 } 2942 2943 /* 2944 * pmap_remove_pde: do the things to unmap a superpage in a process 2945 */ 2946 static void 2947 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 2948 struct spglist *free) 2949 { 2950 struct md_page *pvh; 2951 pd_entry_t oldpde; 2952 vm_offset_t eva, va; 2953 vm_page_t m, mpte; 2954 2955 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2956 KASSERT((sva & PDRMASK) == 0, 2957 ("pmap_remove_pde: sva is not 4mpage aligned")); 2958 oldpde = pte_load_clear(pdq); 2959 if (oldpde & PG_W) 2960 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; 2961 2962 /* 2963 * Machines that don't support invlpg, also don't support 2964 * PG_G. 2965 */ 2966 if ((oldpde & PG_G) != 0) 2967 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 2968 2969 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2970 if (oldpde & PG_MANAGED) { 2971 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 2972 pmap_pvh_free(pvh, pmap, sva); 2973 eva = sva + NBPDR; 2974 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 2975 va < eva; va += PAGE_SIZE, m++) { 2976 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2977 vm_page_dirty(m); 2978 if (oldpde & PG_A) 2979 vm_page_aflag_set(m, PGA_REFERENCED); 2980 if (TAILQ_EMPTY(&m->md.pv_list) && 2981 TAILQ_EMPTY(&pvh->pv_list)) 2982 vm_page_aflag_clear(m, PGA_WRITEABLE); 2983 } 2984 } 2985 if (pmap == kernel_pmap) { 2986 pmap_remove_kernel_pde(pmap, pdq, sva); 2987 } else { 2988 mpte = pmap_remove_pt_page(pmap, sva); 2989 if (mpte != NULL) { 2990 KASSERT(vm_page_any_valid(mpte), 2991 ("pmap_remove_pde: pte page not promoted")); 2992 pmap->pm_stats.resident_count--; 2993 KASSERT(mpte->ref_count == NPTEPG, 2994 ("pmap_remove_pde: pte page ref count error")); 2995 mpte->ref_count = 0; 2996 pmap_add_delayed_free_list(mpte, free, FALSE); 2997 } 2998 } 2999 } 3000 3001 /* 3002 * pmap_remove_pte: do the things to unmap a page in a process 3003 */ 3004 static int 3005 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 3006 struct spglist *free) 3007 { 3008 pt_entry_t oldpte; 3009 vm_page_t m; 3010 3011 rw_assert(&pvh_global_lock, RA_WLOCKED); 3012 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3013 oldpte = pte_load_clear(ptq); 3014 KASSERT(oldpte != 0, 3015 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va)); 3016 if (oldpte & PG_W) 3017 pmap->pm_stats.wired_count -= 1; 3018 /* 3019 * Machines that don't support invlpg, also don't support 3020 * PG_G. 3021 */ 3022 if (oldpte & PG_G) 3023 pmap_invalidate_page_int(kernel_pmap, va); 3024 pmap->pm_stats.resident_count -= 1; 3025 if (oldpte & PG_MANAGED) { 3026 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 3027 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3028 vm_page_dirty(m); 3029 if (oldpte & PG_A) 3030 vm_page_aflag_set(m, PGA_REFERENCED); 3031 pmap_remove_entry(pmap, m, va); 3032 } 3033 return (pmap_unuse_pt(pmap, va, free)); 3034 } 3035 3036 /* 3037 * Remove a single page from a process address space 3038 */ 3039 static void 3040 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 3041 { 3042 pt_entry_t *pte; 3043 3044 rw_assert(&pvh_global_lock, RA_WLOCKED); 3045 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3046 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3047 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) 3048 return; 3049 pmap_remove_pte(pmap, pte, va, free); 3050 pmap_invalidate_page_int(pmap, va); 3051 } 3052 3053 /* 3054 * Removes the specified range of addresses from the page table page. 3055 */ 3056 static bool 3057 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3058 struct spglist *free) 3059 { 3060 pt_entry_t *pte; 3061 bool anyvalid; 3062 3063 rw_assert(&pvh_global_lock, RA_WLOCKED); 3064 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3065 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3066 anyvalid = false; 3067 for (pte = pmap_pte_quick(pmap, sva); sva != eva; pte++, 3068 sva += PAGE_SIZE) { 3069 if (*pte == 0) 3070 continue; 3071 3072 /* 3073 * The TLB entry for a PG_G mapping is invalidated by 3074 * pmap_remove_pte(). 3075 */ 3076 if ((*pte & PG_G) == 0) 3077 anyvalid = true; 3078 3079 if (pmap_remove_pte(pmap, pte, sva, free)) 3080 break; 3081 } 3082 return (anyvalid); 3083 } 3084 3085 /* 3086 * Remove the given range of addresses from the specified map. 3087 * 3088 * It is assumed that the start and end are properly 3089 * rounded to the page size. 3090 */ 3091 static void 3092 __CONCAT(PMTYPE, remove)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3093 { 3094 vm_offset_t pdnxt; 3095 pd_entry_t ptpaddr; 3096 struct spglist free; 3097 int anyvalid; 3098 3099 /* 3100 * Perform an unsynchronized read. This is, however, safe. 3101 */ 3102 if (pmap->pm_stats.resident_count == 0) 3103 return; 3104 3105 anyvalid = 0; 3106 SLIST_INIT(&free); 3107 3108 rw_wlock(&pvh_global_lock); 3109 sched_pin(); 3110 PMAP_LOCK(pmap); 3111 3112 /* 3113 * special handling of removing one page. a very 3114 * common operation and easy to short circuit some 3115 * code. 3116 */ 3117 if ((sva + PAGE_SIZE == eva) && 3118 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 3119 pmap_remove_page(pmap, sva, &free); 3120 goto out; 3121 } 3122 3123 for (; sva < eva; sva = pdnxt) { 3124 u_int pdirindex; 3125 3126 /* 3127 * Calculate index for next page table. 3128 */ 3129 pdnxt = (sva + NBPDR) & ~PDRMASK; 3130 if (pdnxt < sva) 3131 pdnxt = eva; 3132 if (pmap->pm_stats.resident_count == 0) 3133 break; 3134 3135 pdirindex = sva >> PDRSHIFT; 3136 ptpaddr = pmap->pm_pdir[pdirindex]; 3137 3138 /* 3139 * Weed out invalid mappings. Note: we assume that the page 3140 * directory table is always allocated, and in kernel virtual. 3141 */ 3142 if (ptpaddr == 0) 3143 continue; 3144 3145 /* 3146 * Check for large page. 3147 */ 3148 if ((ptpaddr & PG_PS) != 0) { 3149 /* 3150 * Are we removing the entire large page? If not, 3151 * demote the mapping and fall through. 3152 */ 3153 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3154 /* 3155 * The TLB entry for a PG_G mapping is 3156 * invalidated by pmap_remove_pde(). 3157 */ 3158 if ((ptpaddr & PG_G) == 0) 3159 anyvalid = 1; 3160 pmap_remove_pde(pmap, 3161 &pmap->pm_pdir[pdirindex], sva, &free); 3162 continue; 3163 } else if (!pmap_demote_pde(pmap, 3164 &pmap->pm_pdir[pdirindex], sva)) { 3165 /* The large page mapping was destroyed. */ 3166 continue; 3167 } 3168 } 3169 3170 /* 3171 * Limit our scan to either the end of the va represented 3172 * by the current page table page, or to the end of the 3173 * range being removed. 3174 */ 3175 if (pdnxt > eva) 3176 pdnxt = eva; 3177 3178 if (pmap_remove_ptes(pmap, sva, pdnxt, &free)) 3179 anyvalid = 1; 3180 } 3181 out: 3182 sched_unpin(); 3183 if (anyvalid) 3184 pmap_invalidate_all_int(pmap); 3185 rw_wunlock(&pvh_global_lock); 3186 PMAP_UNLOCK(pmap); 3187 vm_page_free_pages_toq(&free, true); 3188 } 3189 3190 /* 3191 * Routine: pmap_remove_all 3192 * Function: 3193 * Removes this physical page from 3194 * all physical maps in which it resides. 3195 * Reflects back modify bits to the pager. 3196 * 3197 * Notes: 3198 * Original versions of this routine were very 3199 * inefficient because they iteratively called 3200 * pmap_remove (slow...) 3201 */ 3202 3203 static void 3204 __CONCAT(PMTYPE, remove_all)(vm_page_t m) 3205 { 3206 struct md_page *pvh; 3207 pv_entry_t pv; 3208 pmap_t pmap; 3209 pt_entry_t *pte, tpte; 3210 pd_entry_t *pde; 3211 vm_offset_t va; 3212 struct spglist free; 3213 3214 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3215 ("pmap_remove_all: page %p is not managed", m)); 3216 SLIST_INIT(&free); 3217 rw_wlock(&pvh_global_lock); 3218 sched_pin(); 3219 if ((m->flags & PG_FICTITIOUS) != 0) 3220 goto small_mappings; 3221 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3222 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 3223 va = pv->pv_va; 3224 pmap = PV_PMAP(pv); 3225 PMAP_LOCK(pmap); 3226 pde = pmap_pde(pmap, va); 3227 (void)pmap_demote_pde(pmap, pde, va); 3228 PMAP_UNLOCK(pmap); 3229 } 3230 small_mappings: 3231 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3232 pmap = PV_PMAP(pv); 3233 PMAP_LOCK(pmap); 3234 pmap->pm_stats.resident_count--; 3235 pde = pmap_pde(pmap, pv->pv_va); 3236 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" 3237 " a 4mpage in page %p's pv list", m)); 3238 pte = pmap_pte_quick(pmap, pv->pv_va); 3239 tpte = pte_load_clear(pte); 3240 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte", 3241 pmap, pv->pv_va)); 3242 if (tpte & PG_W) 3243 pmap->pm_stats.wired_count--; 3244 if (tpte & PG_A) 3245 vm_page_aflag_set(m, PGA_REFERENCED); 3246 3247 /* 3248 * Update the vm_page_t clean and reference bits. 3249 */ 3250 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3251 vm_page_dirty(m); 3252 pmap_unuse_pt(pmap, pv->pv_va, &free); 3253 pmap_invalidate_page_int(pmap, pv->pv_va); 3254 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 3255 free_pv_entry(pmap, pv); 3256 PMAP_UNLOCK(pmap); 3257 } 3258 vm_page_aflag_clear(m, PGA_WRITEABLE); 3259 sched_unpin(); 3260 rw_wunlock(&pvh_global_lock); 3261 vm_page_free_pages_toq(&free, true); 3262 } 3263 3264 /* 3265 * pmap_protect_pde: do the things to protect a 4mpage in a process 3266 */ 3267 static boolean_t 3268 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) 3269 { 3270 pd_entry_t newpde, oldpde; 3271 vm_page_t m, mt; 3272 boolean_t anychanged; 3273 3274 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3275 KASSERT((sva & PDRMASK) == 0, 3276 ("pmap_protect_pde: sva is not 4mpage aligned")); 3277 anychanged = FALSE; 3278 retry: 3279 oldpde = newpde = *pde; 3280 if ((prot & VM_PROT_WRITE) == 0) { 3281 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == 3282 (PG_MANAGED | PG_M | PG_RW)) { 3283 m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3284 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 3285 vm_page_dirty(mt); 3286 } 3287 newpde &= ~(PG_RW | PG_M); 3288 } 3289 #ifdef PMAP_PAE_COMP 3290 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3291 newpde |= pg_nx; 3292 #endif 3293 if (newpde != oldpde) { 3294 /* 3295 * As an optimization to future operations on this PDE, clear 3296 * PG_PROMOTED. The impending invalidation will remove any 3297 * lingering 4KB page mappings from the TLB. 3298 */ 3299 if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED)) 3300 goto retry; 3301 if ((oldpde & PG_G) != 0) 3302 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 3303 else 3304 anychanged = TRUE; 3305 } 3306 return (anychanged); 3307 } 3308 3309 /* 3310 * Set the physical protection on the 3311 * specified range of this map as requested. 3312 */ 3313 static void 3314 __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3315 vm_prot_t prot) 3316 { 3317 vm_offset_t pdnxt; 3318 pd_entry_t ptpaddr; 3319 pt_entry_t *pte; 3320 boolean_t anychanged, pv_lists_locked; 3321 3322 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 3323 if (prot == VM_PROT_NONE) { 3324 pmap_remove(pmap, sva, eva); 3325 return; 3326 } 3327 3328 #ifdef PMAP_PAE_COMP 3329 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 3330 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 3331 return; 3332 #else 3333 if (prot & VM_PROT_WRITE) 3334 return; 3335 #endif 3336 3337 if (pmap_is_current(pmap)) 3338 pv_lists_locked = FALSE; 3339 else { 3340 pv_lists_locked = TRUE; 3341 resume: 3342 rw_wlock(&pvh_global_lock); 3343 sched_pin(); 3344 } 3345 anychanged = FALSE; 3346 3347 PMAP_LOCK(pmap); 3348 for (; sva < eva; sva = pdnxt) { 3349 pt_entry_t obits, pbits; 3350 u_int pdirindex; 3351 3352 pdnxt = (sva + NBPDR) & ~PDRMASK; 3353 if (pdnxt < sva) 3354 pdnxt = eva; 3355 3356 pdirindex = sva >> PDRSHIFT; 3357 ptpaddr = pmap->pm_pdir[pdirindex]; 3358 3359 /* 3360 * Weed out invalid mappings. Note: we assume that the page 3361 * directory table is always allocated, and in kernel virtual. 3362 */ 3363 if (ptpaddr == 0) 3364 continue; 3365 3366 /* 3367 * Check for large page. 3368 */ 3369 if ((ptpaddr & PG_PS) != 0) { 3370 /* 3371 * Are we protecting the entire large page? If not, 3372 * demote the mapping and fall through. 3373 */ 3374 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3375 /* 3376 * The TLB entry for a PG_G mapping is 3377 * invalidated by pmap_protect_pde(). 3378 */ 3379 if (pmap_protect_pde(pmap, 3380 &pmap->pm_pdir[pdirindex], sva, prot)) 3381 anychanged = TRUE; 3382 continue; 3383 } else { 3384 if (!pv_lists_locked) { 3385 pv_lists_locked = TRUE; 3386 if (!rw_try_wlock(&pvh_global_lock)) { 3387 if (anychanged) 3388 pmap_invalidate_all_int( 3389 pmap); 3390 PMAP_UNLOCK(pmap); 3391 goto resume; 3392 } 3393 sched_pin(); 3394 } 3395 if (!pmap_demote_pde(pmap, 3396 &pmap->pm_pdir[pdirindex], sva)) { 3397 /* 3398 * The large page mapping was 3399 * destroyed. 3400 */ 3401 continue; 3402 } 3403 } 3404 } 3405 3406 if (pdnxt > eva) 3407 pdnxt = eva; 3408 3409 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 3410 sva += PAGE_SIZE) { 3411 vm_page_t m; 3412 3413 retry: 3414 /* 3415 * Regardless of whether a pte is 32 or 64 bits in 3416 * size, PG_RW, PG_A, and PG_M are among the least 3417 * significant 32 bits. 3418 */ 3419 obits = pbits = *pte; 3420 if ((pbits & PG_V) == 0) 3421 continue; 3422 3423 if ((prot & VM_PROT_WRITE) == 0) { 3424 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 3425 (PG_MANAGED | PG_M | PG_RW)) { 3426 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 3427 vm_page_dirty(m); 3428 } 3429 pbits &= ~(PG_RW | PG_M); 3430 } 3431 #ifdef PMAP_PAE_COMP 3432 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3433 pbits |= pg_nx; 3434 #endif 3435 3436 if (pbits != obits) { 3437 #ifdef PMAP_PAE_COMP 3438 if (!atomic_cmpset_64(pte, obits, pbits)) 3439 goto retry; 3440 #else 3441 if (!atomic_cmpset_int((u_int *)pte, obits, 3442 pbits)) 3443 goto retry; 3444 #endif 3445 if (obits & PG_G) 3446 pmap_invalidate_page_int(pmap, sva); 3447 else 3448 anychanged = TRUE; 3449 } 3450 } 3451 } 3452 if (anychanged) 3453 pmap_invalidate_all_int(pmap); 3454 if (pv_lists_locked) { 3455 sched_unpin(); 3456 rw_wunlock(&pvh_global_lock); 3457 } 3458 PMAP_UNLOCK(pmap); 3459 } 3460 3461 #if VM_NRESERVLEVEL > 0 3462 /* 3463 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are 3464 * within a single page table page (PTP) to a single 2- or 4MB page mapping. 3465 * For promotion to occur, two conditions must be met: (1) the 4KB page 3466 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3467 * mappings must have identical characteristics. 3468 * 3469 * Managed (PG_MANAGED) mappings within the kernel address space are not 3470 * promoted. The reason is that kernel PDEs are replicated in each pmap but 3471 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel 3472 * pmap. 3473 */ 3474 static bool 3475 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte) 3476 { 3477 pd_entry_t newpde; 3478 pt_entry_t allpte_PG_A, *firstpte, oldpte, pa, *pte; 3479 #ifdef KTR 3480 vm_offset_t oldpteva; 3481 #endif 3482 3483 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3484 if (!pg_ps_enabled) 3485 return (false); 3486 3487 /* 3488 * Examine the first PTE in the specified PTP. Abort if this PTE is 3489 * either invalid or does not map the first 4KB physical page 3490 * within a 2- or 4MB page. 3491 */ 3492 firstpte = pmap_pte_quick(pmap, trunc_4mpage(va)); 3493 setpde: 3494 newpde = *firstpte; 3495 if ((newpde & ((PG_FRAME & PDRMASK) | PG_V)) != PG_V) { 3496 pmap_pde_p_failures++; 3497 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3498 " in pmap %p", va, pmap); 3499 return (false); 3500 } 3501 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) { 3502 pmap_pde_p_failures++; 3503 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3504 " in pmap %p", va, pmap); 3505 return (false); 3506 } 3507 3508 /* 3509 * Both here and in the below "for" loop, to allow for repromotion 3510 * after MADV_FREE, conditionally write protect a clean PTE before 3511 * possibly aborting the promotion due to other PTE attributes. Why? 3512 * Suppose that MADV_FREE is applied to a part of a superpage, the 3513 * address range [S, E). pmap_advise() will demote the superpage 3514 * mapping, destroy the 4KB page mapping at the end of [S, E), and 3515 * clear PG_M and PG_A in the PTEs for the rest of [S, E). Later, 3516 * imagine that the memory in [S, E) is recycled, but the last 4KB 3517 * page in [S, E) is not the last to be rewritten, or simply accessed. 3518 * In other words, there is still a 4KB page in [S, E), call it P, 3519 * that is writeable but PG_M and PG_A are clear in P's PTE. Unless 3520 * we write protect P before aborting the promotion, if and when P is 3521 * finally rewritten, there won't be a page fault to trigger 3522 * repromotion. 3523 */ 3524 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 3525 /* 3526 * When PG_M is already clear, PG_RW can be cleared without 3527 * a TLB invalidation. 3528 */ 3529 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde & 3530 ~PG_RW)) 3531 goto setpde; 3532 newpde &= ~PG_RW; 3533 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx" 3534 " in pmap %p", va & ~PDRMASK, pmap); 3535 } 3536 3537 /* 3538 * Examine each of the other PTEs in the specified PTP. Abort if this 3539 * PTE maps an unexpected 4KB physical page or does not have identical 3540 * characteristics to the first PTE. 3541 */ 3542 allpte_PG_A = newpde & PG_A; 3543 pa = (newpde & (PG_PS_FRAME | PG_V)) + NBPDR - PAGE_SIZE; 3544 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 3545 setpte: 3546 oldpte = *pte; 3547 if ((oldpte & (PG_FRAME | PG_V)) != pa) { 3548 pmap_pde_p_failures++; 3549 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3550 " in pmap %p", va, pmap); 3551 return (false); 3552 } 3553 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 3554 /* 3555 * When PG_M is already clear, PG_RW can be cleared 3556 * without a TLB invalidation. 3557 */ 3558 if (!atomic_cmpset_int((u_int *)pte, oldpte, 3559 oldpte & ~PG_RW)) 3560 goto setpte; 3561 oldpte &= ~PG_RW; 3562 #ifdef KTR 3563 oldpteva = (oldpte & PG_FRAME & PDRMASK) | 3564 (va & ~PDRMASK); 3565 #endif 3566 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x" 3567 " in pmap %p", oldpteva, pmap); 3568 } 3569 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 3570 pmap_pde_p_failures++; 3571 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3572 " in pmap %p", va, pmap); 3573 return (false); 3574 } 3575 allpte_PG_A &= oldpte; 3576 pa -= PAGE_SIZE; 3577 } 3578 3579 /* 3580 * Unless all PTEs have PG_A set, clear it from the superpage mapping, 3581 * so that promotions triggered by speculative mappings, such as 3582 * pmap_enter_quick(), don't automatically mark the underlying pages 3583 * as referenced. 3584 */ 3585 newpde &= ~PG_A | allpte_PG_A; 3586 3587 /* 3588 * Save the PTP in its current state until the PDE mapping the 3589 * superpage is demoted by pmap_demote_pde() or destroyed by 3590 * pmap_remove_pde(). If PG_A is not set in every PTE, then request 3591 * that the PTP be refilled on demotion. 3592 */ 3593 if (mpte == NULL) 3594 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3595 KASSERT(mpte >= vm_page_array && 3596 mpte < &vm_page_array[vm_page_array_size], 3597 ("pmap_promote_pde: page table page is out of range")); 3598 KASSERT(mpte->pindex == va >> PDRSHIFT, 3599 ("pmap_promote_pde: page table page's pindex is wrong")); 3600 if (pmap_insert_pt_page(pmap, mpte, true, allpte_PG_A != 0)) { 3601 pmap_pde_p_failures++; 3602 CTR2(KTR_PMAP, 3603 "pmap_promote_pde: failure for va %#x in pmap %p", va, 3604 pmap); 3605 return (false); 3606 } 3607 3608 /* 3609 * Promote the pv entries. 3610 */ 3611 if ((newpde & PG_MANAGED) != 0) 3612 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME); 3613 3614 /* 3615 * Propagate the PAT index to its proper position. 3616 */ 3617 if ((newpde & PG_PTE_PAT) != 0) 3618 newpde ^= PG_PDE_PAT | PG_PTE_PAT; 3619 3620 /* 3621 * Map the superpage. 3622 */ 3623 if (workaround_erratum383) 3624 pmap_update_pde(pmap, va, pde, PG_PS | newpde); 3625 else if (pmap == kernel_pmap) 3626 pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde); 3627 else 3628 pde_store(pde, PG_PROMOTED | PG_PS | newpde); 3629 3630 pmap_pde_promotions++; 3631 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" 3632 " in pmap %p", va, pmap); 3633 return (true); 3634 } 3635 #endif /* VM_NRESERVLEVEL > 0 */ 3636 3637 /* 3638 * Insert the given physical page (p) at 3639 * the specified virtual address (v) in the 3640 * target physical map with the protection requested. 3641 * 3642 * If specified, the page will be wired down, meaning 3643 * that the related pte can not be reclaimed. 3644 * 3645 * NB: This is the only routine which MAY NOT lazy-evaluate 3646 * or lose information. That is, this routine must actually 3647 * insert this page into the given map NOW. 3648 */ 3649 static int 3650 __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m, 3651 vm_prot_t prot, u_int flags, int8_t psind) 3652 { 3653 pd_entry_t *pde; 3654 pt_entry_t *pte; 3655 pt_entry_t newpte, origpte; 3656 pv_entry_t pv; 3657 vm_paddr_t opa, pa; 3658 vm_page_t mpte, om; 3659 int rv; 3660 3661 va = trunc_page(va); 3662 KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) || 3663 (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS), 3664 ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va)); 3665 KASSERT(va < PMAP_TRM_MIN_ADDRESS, 3666 ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", 3667 va)); 3668 KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || 3669 !VA_IS_CLEANMAP(va), 3670 ("pmap_enter: managed mapping within the clean submap")); 3671 if ((m->oflags & VPO_UNMANAGED) == 0) 3672 VM_PAGE_OBJECT_BUSY_ASSERT(m); 3673 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3674 ("pmap_enter: flags %u has reserved bits set", flags)); 3675 pa = VM_PAGE_TO_PHYS(m); 3676 newpte = (pt_entry_t)(pa | PG_A | PG_V); 3677 if ((flags & VM_PROT_WRITE) != 0) 3678 newpte |= PG_M; 3679 if ((prot & VM_PROT_WRITE) != 0) 3680 newpte |= PG_RW; 3681 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 3682 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 3683 #ifdef PMAP_PAE_COMP 3684 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3685 newpte |= pg_nx; 3686 #endif 3687 if ((flags & PMAP_ENTER_WIRED) != 0) 3688 newpte |= PG_W; 3689 if (pmap != kernel_pmap) 3690 newpte |= PG_U; 3691 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); 3692 if ((m->oflags & VPO_UNMANAGED) == 0) 3693 newpte |= PG_MANAGED; 3694 3695 rw_wlock(&pvh_global_lock); 3696 PMAP_LOCK(pmap); 3697 sched_pin(); 3698 if (psind == 1) { 3699 /* Assert the required virtual and physical alignment. */ 3700 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned")); 3701 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); 3702 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m); 3703 goto out; 3704 } 3705 3706 pde = pmap_pde(pmap, va); 3707 if (pmap != kernel_pmap) { 3708 /* 3709 * va is for UVA. 3710 * In the case that a page table page is not resident, 3711 * we are creating it here. pmap_allocpte() handles 3712 * demotion. 3713 */ 3714 mpte = pmap_allocpte(pmap, va, flags); 3715 if (mpte == NULL) { 3716 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3717 ("pmap_allocpte failed with sleep allowed")); 3718 rv = KERN_RESOURCE_SHORTAGE; 3719 goto out; 3720 } 3721 } else { 3722 /* 3723 * va is for KVA, so pmap_demote_pde() will never fail 3724 * to install a page table page. PG_V is also 3725 * asserted by pmap_demote_pde(). 3726 */ 3727 mpte = NULL; 3728 KASSERT(pde != NULL && (*pde & PG_V) != 0, 3729 ("KVA %#x invalid pde pdir %#jx", va, 3730 (uintmax_t)pmap->pm_pdir[PTDPTDI])); 3731 if ((*pde & PG_PS) != 0) 3732 pmap_demote_pde(pmap, pde, va); 3733 } 3734 pte = pmap_pte_quick(pmap, va); 3735 3736 /* 3737 * Page Directory table entry is not valid, which should not 3738 * happen. We should have either allocated the page table 3739 * page or demoted the existing mapping above. 3740 */ 3741 if (pte == NULL) { 3742 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 3743 (uintmax_t)pmap->pm_pdir[PTDPTDI], va); 3744 } 3745 3746 origpte = *pte; 3747 pv = NULL; 3748 3749 /* 3750 * Is the specified virtual address already mapped? 3751 */ 3752 if ((origpte & PG_V) != 0) { 3753 /* 3754 * Wiring change, just update stats. We don't worry about 3755 * wiring PT pages as they remain resident as long as there 3756 * are valid mappings in them. Hence, if a user page is wired, 3757 * the PT page will be also. 3758 */ 3759 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 3760 pmap->pm_stats.wired_count++; 3761 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 3762 pmap->pm_stats.wired_count--; 3763 3764 /* 3765 * Remove the extra PT page reference. 3766 */ 3767 if (mpte != NULL) { 3768 mpte->ref_count--; 3769 KASSERT(mpte->ref_count > 0, 3770 ("pmap_enter: missing reference to page table page," 3771 " va: 0x%x", va)); 3772 } 3773 3774 /* 3775 * Has the physical page changed? 3776 */ 3777 opa = origpte & PG_FRAME; 3778 if (opa == pa) { 3779 /* 3780 * No, might be a protection or wiring change. 3781 */ 3782 if ((origpte & PG_MANAGED) != 0 && 3783 (newpte & PG_RW) != 0) 3784 vm_page_aflag_set(m, PGA_WRITEABLE); 3785 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 3786 goto unchanged; 3787 goto validate; 3788 } 3789 3790 /* 3791 * The physical page has changed. Temporarily invalidate 3792 * the mapping. This ensures that all threads sharing the 3793 * pmap keep a consistent view of the mapping, which is 3794 * necessary for the correct handling of COW faults. It 3795 * also permits reuse of the old mapping's PV entry, 3796 * avoiding an allocation. 3797 * 3798 * For consistency, handle unmanaged mappings the same way. 3799 */ 3800 origpte = pte_load_clear(pte); 3801 KASSERT((origpte & PG_FRAME) == opa, 3802 ("pmap_enter: unexpected pa update for %#x", va)); 3803 if ((origpte & PG_MANAGED) != 0) { 3804 om = PHYS_TO_VM_PAGE(opa); 3805 3806 /* 3807 * The pmap lock is sufficient to synchronize with 3808 * concurrent calls to pmap_page_test_mappings() and 3809 * pmap_ts_referenced(). 3810 */ 3811 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3812 vm_page_dirty(om); 3813 if ((origpte & PG_A) != 0) { 3814 pmap_invalidate_page_int(pmap, va); 3815 vm_page_aflag_set(om, PGA_REFERENCED); 3816 } 3817 pv = pmap_pvh_remove(&om->md, pmap, va); 3818 KASSERT(pv != NULL, 3819 ("pmap_enter: no PV entry for %#x", va)); 3820 if ((newpte & PG_MANAGED) == 0) 3821 free_pv_entry(pmap, pv); 3822 if ((om->a.flags & PGA_WRITEABLE) != 0 && 3823 TAILQ_EMPTY(&om->md.pv_list) && 3824 ((om->flags & PG_FICTITIOUS) != 0 || 3825 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 3826 vm_page_aflag_clear(om, PGA_WRITEABLE); 3827 } else { 3828 /* 3829 * Since this mapping is unmanaged, assume that PG_A 3830 * is set. 3831 */ 3832 pmap_invalidate_page_int(pmap, va); 3833 } 3834 origpte = 0; 3835 } else { 3836 /* 3837 * Increment the counters. 3838 */ 3839 if ((newpte & PG_W) != 0) 3840 pmap->pm_stats.wired_count++; 3841 pmap->pm_stats.resident_count++; 3842 } 3843 3844 /* 3845 * Enter on the PV list if part of our managed memory. 3846 */ 3847 if ((newpte & PG_MANAGED) != 0) { 3848 if (pv == NULL) { 3849 pv = get_pv_entry(pmap, FALSE); 3850 pv->pv_va = va; 3851 } 3852 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3853 if ((newpte & PG_RW) != 0) 3854 vm_page_aflag_set(m, PGA_WRITEABLE); 3855 } 3856 3857 /* 3858 * Update the PTE. 3859 */ 3860 if ((origpte & PG_V) != 0) { 3861 validate: 3862 origpte = pte_load_store(pte, newpte); 3863 KASSERT((origpte & PG_FRAME) == pa, 3864 ("pmap_enter: unexpected pa update for %#x", va)); 3865 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == 3866 (PG_M | PG_RW)) { 3867 if ((origpte & PG_MANAGED) != 0) 3868 vm_page_dirty(m); 3869 3870 /* 3871 * Although the PTE may still have PG_RW set, TLB 3872 * invalidation may nonetheless be required because 3873 * the PTE no longer has PG_M set. 3874 */ 3875 } 3876 #ifdef PMAP_PAE_COMP 3877 else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { 3878 /* 3879 * This PTE change does not require TLB invalidation. 3880 */ 3881 goto unchanged; 3882 } 3883 #endif 3884 if ((origpte & PG_A) != 0) 3885 pmap_invalidate_page_int(pmap, va); 3886 } else 3887 pte_store_zero(pte, newpte); 3888 3889 unchanged: 3890 3891 #if VM_NRESERVLEVEL > 0 3892 /* 3893 * If both the page table page and the reservation are fully 3894 * populated, then attempt promotion. 3895 */ 3896 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 3897 (m->flags & PG_FICTITIOUS) == 0 && 3898 vm_reserv_level_iffullpop(m) == 0) 3899 (void)pmap_promote_pde(pmap, pde, va, mpte); 3900 #endif 3901 3902 rv = KERN_SUCCESS; 3903 out: 3904 sched_unpin(); 3905 rw_wunlock(&pvh_global_lock); 3906 PMAP_UNLOCK(pmap); 3907 return (rv); 3908 } 3909 3910 /* 3911 * Tries to create a read- and/or execute-only 2 or 4 MB page mapping. Returns 3912 * KERN_SUCCESS if the mapping was created. Otherwise, returns an error 3913 * value. See pmap_enter_pde() for the possible error values when "no sleep", 3914 * "no replace", and "no reclaim" are specified. 3915 */ 3916 static int 3917 pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3918 { 3919 pd_entry_t newpde; 3920 3921 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3922 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | 3923 PG_PS | PG_V; 3924 if ((m->oflags & VPO_UNMANAGED) == 0) 3925 newpde |= PG_MANAGED; 3926 #ifdef PMAP_PAE_COMP 3927 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3928 newpde |= pg_nx; 3929 #endif 3930 if (pmap != kernel_pmap) 3931 newpde |= PG_U; 3932 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | 3933 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL)); 3934 } 3935 3936 /* 3937 * Returns true if every page table entry in the page table page that maps 3938 * the specified kernel virtual address is zero. 3939 */ 3940 static bool 3941 pmap_every_pte_zero(vm_offset_t va) 3942 { 3943 pt_entry_t *pt_end, *pte; 3944 3945 KASSERT((va & PDRMASK) == 0, ("va is misaligned")); 3946 pte = vtopte(va); 3947 for (pt_end = pte + NPTEPG; pte < pt_end; pte++) { 3948 if (*pte != 0) 3949 return (false); 3950 } 3951 return (true); 3952 } 3953 3954 /* 3955 * Tries to create the specified 2 or 4 MB page mapping. Returns KERN_SUCCESS 3956 * if the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, 3957 * or KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if 3958 * PMAP_ENTER_NOREPLACE was specified and a 4 KB page mapping already exists 3959 * within the 2 or 4 MB virtual address range starting at the specified virtual 3960 * address. Returns KERN_NO_SPACE if PMAP_ENTER_NOREPLACE was specified and a 3961 * 2 or 4 MB page mapping already exists at the specified virtual address. 3962 * Returns KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NORECLAIM was specified and a 3963 * PV entry allocation failed. 3964 * 3965 * The parameter "m" is only used when creating a managed, writeable mapping. 3966 */ 3967 static int 3968 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, 3969 vm_page_t m) 3970 { 3971 struct spglist free; 3972 pd_entry_t oldpde, *pde; 3973 vm_page_t mt; 3974 vm_page_t uwptpg; 3975 3976 rw_assert(&pvh_global_lock, RA_WLOCKED); 3977 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, 3978 ("pmap_enter_pde: newpde is missing PG_M")); 3979 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3980 pde = pmap_pde(pmap, va); 3981 oldpde = *pde; 3982 if ((oldpde & PG_V) != 0) { 3983 if ((flags & PMAP_ENTER_NOREPLACE) != 0) { 3984 if ((oldpde & PG_PS) != 0) { 3985 CTR2(KTR_PMAP, 3986 "pmap_enter_pde: no space for va %#lx" 3987 " in pmap %p", va, pmap); 3988 return (KERN_NO_SPACE); 3989 } else if (pmap != kernel_pmap || 3990 !pmap_every_pte_zero(va)) { 3991 CTR2(KTR_PMAP, 3992 "pmap_enter_pde: failure for va %#lx" 3993 " in pmap %p", va, pmap); 3994 return (KERN_FAILURE); 3995 } 3996 } 3997 /* Break the existing mapping(s). */ 3998 SLIST_INIT(&free); 3999 if ((oldpde & PG_PS) != 0) { 4000 /* 4001 * If the PDE resulted from a promotion, then a 4002 * reserved PT page could be freed. 4003 */ 4004 (void)pmap_remove_pde(pmap, pde, va, &free); 4005 if ((oldpde & PG_G) == 0) 4006 pmap_invalidate_pde_page(pmap, va, oldpde); 4007 } else { 4008 if (pmap_remove_ptes(pmap, va, va + NBPDR, &free)) 4009 pmap_invalidate_all_int(pmap); 4010 } 4011 if (pmap != kernel_pmap) { 4012 vm_page_free_pages_toq(&free, true); 4013 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", 4014 pde)); 4015 } else { 4016 KASSERT(SLIST_EMPTY(&free), 4017 ("pmap_enter_pde: freed kernel page table page")); 4018 4019 /* 4020 * Both pmap_remove_pde() and pmap_remove_ptes() will 4021 * leave the kernel page table page zero filled. 4022 */ 4023 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 4024 if (pmap_insert_pt_page(pmap, mt, false, false)) 4025 panic("pmap_enter_pde: trie insert failed"); 4026 } 4027 } 4028 4029 /* 4030 * Allocate a leaf ptpage for wired userspace pages. 4031 */ 4032 uwptpg = NULL; 4033 if ((newpde & PG_W) != 0 && pmap != kernel_pmap) { 4034 uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED); 4035 if (uwptpg == NULL) { 4036 return (KERN_RESOURCE_SHORTAGE); 4037 } 4038 uwptpg->pindex = va >> PDRSHIFT; 4039 if (pmap_insert_pt_page(pmap, uwptpg, true, false)) { 4040 vm_page_unwire_noq(uwptpg); 4041 vm_page_free(uwptpg); 4042 return (KERN_RESOURCE_SHORTAGE); 4043 } 4044 pmap->pm_stats.resident_count++; 4045 uwptpg->ref_count = NPTEPG; 4046 } 4047 if ((newpde & PG_MANAGED) != 0) { 4048 /* 4049 * Abort this mapping if its PV entry could not be created. 4050 */ 4051 if (!pmap_pv_insert_pde(pmap, va, newpde, flags)) { 4052 if (uwptpg != NULL) { 4053 mt = pmap_remove_pt_page(pmap, va); 4054 KASSERT(mt == uwptpg, 4055 ("removed pt page %p, expected %p", mt, 4056 uwptpg)); 4057 pmap->pm_stats.resident_count--; 4058 uwptpg->ref_count = 1; 4059 vm_page_unwire_noq(uwptpg); 4060 vm_page_free(uwptpg); 4061 } 4062 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 4063 " in pmap %p", va, pmap); 4064 return (KERN_RESOURCE_SHORTAGE); 4065 } 4066 if ((newpde & PG_RW) != 0) { 4067 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4068 vm_page_aflag_set(mt, PGA_WRITEABLE); 4069 } 4070 } 4071 4072 /* 4073 * Increment counters. 4074 */ 4075 if ((newpde & PG_W) != 0) 4076 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; 4077 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; 4078 4079 /* 4080 * Map the superpage. (This is not a promoted mapping; there will not 4081 * be any lingering 4KB page mappings in the TLB.) 4082 */ 4083 pde_store(pde, newpde); 4084 4085 pmap_pde_mappings++; 4086 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p", 4087 va, pmap); 4088 return (KERN_SUCCESS); 4089 } 4090 4091 /* 4092 * Maps a sequence of resident pages belonging to the same object. 4093 * The sequence begins with the given page m_start. This page is 4094 * mapped at the given virtual address start. Each subsequent page is 4095 * mapped at a virtual address that is offset from start by the same 4096 * amount as the page is offset from m_start within the object. The 4097 * last page in the sequence is the page with the largest offset from 4098 * m_start that can be mapped at a virtual address less than the given 4099 * virtual address end. Not every virtual page between start and end 4100 * is mapped; only those for which a resident page exists with the 4101 * corresponding offset from m_start are mapped. 4102 */ 4103 static void 4104 __CONCAT(PMTYPE, enter_object)(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4105 vm_page_t m_start, vm_prot_t prot) 4106 { 4107 vm_offset_t va; 4108 vm_page_t m, mpte; 4109 vm_pindex_t diff, psize; 4110 int rv; 4111 4112 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4113 4114 psize = atop(end - start); 4115 mpte = NULL; 4116 m = m_start; 4117 rw_wlock(&pvh_global_lock); 4118 PMAP_LOCK(pmap); 4119 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4120 va = start + ptoa(diff); 4121 if ((va & PDRMASK) == 0 && va + NBPDR <= end && 4122 m->psind == 1 && pg_ps_enabled && 4123 ((rv = pmap_enter_4mpage(pmap, va, m, prot)) == 4124 KERN_SUCCESS || rv == KERN_NO_SPACE)) 4125 m = &m[NBPDR / PAGE_SIZE - 1]; 4126 else 4127 mpte = pmap_enter_quick_locked(pmap, va, m, prot, 4128 mpte); 4129 m = TAILQ_NEXT(m, listq); 4130 } 4131 rw_wunlock(&pvh_global_lock); 4132 PMAP_UNLOCK(pmap); 4133 } 4134 4135 /* 4136 * this code makes some *MAJOR* assumptions: 4137 * 1. Current pmap & pmap exists. 4138 * 2. Not wired. 4139 * 3. Read access. 4140 * 4. No page table pages. 4141 * but is *MUCH* faster than pmap_enter... 4142 */ 4143 4144 static void 4145 __CONCAT(PMTYPE, enter_quick)(pmap_t pmap, vm_offset_t va, vm_page_t m, 4146 vm_prot_t prot) 4147 { 4148 4149 rw_wlock(&pvh_global_lock); 4150 PMAP_LOCK(pmap); 4151 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4152 rw_wunlock(&pvh_global_lock); 4153 PMAP_UNLOCK(pmap); 4154 } 4155 4156 static vm_page_t 4157 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4158 vm_prot_t prot, vm_page_t mpte) 4159 { 4160 pt_entry_t newpte, *pte; 4161 pd_entry_t *pde; 4162 4163 KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) || 4164 (m->oflags & VPO_UNMANAGED) != 0, 4165 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 4166 rw_assert(&pvh_global_lock, RA_WLOCKED); 4167 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4168 pde = NULL; 4169 4170 /* 4171 * In the case that a page table page is not 4172 * resident, we are creating it here. 4173 */ 4174 if (pmap != kernel_pmap) { 4175 u_int ptepindex; 4176 pd_entry_t ptepa; 4177 4178 /* 4179 * Calculate pagetable page index 4180 */ 4181 ptepindex = va >> PDRSHIFT; 4182 if (mpte && (mpte->pindex == ptepindex)) { 4183 mpte->ref_count++; 4184 } else { 4185 /* 4186 * Get the page directory entry 4187 */ 4188 pde = &pmap->pm_pdir[ptepindex]; 4189 ptepa = *pde; 4190 4191 /* 4192 * If the page table page is mapped, we just increment 4193 * the hold count, and activate it. 4194 */ 4195 if (ptepa) { 4196 if (ptepa & PG_PS) 4197 return (NULL); 4198 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 4199 mpte->ref_count++; 4200 } else { 4201 mpte = _pmap_allocpte(pmap, ptepindex, 4202 PMAP_ENTER_NOSLEEP); 4203 if (mpte == NULL) 4204 return (mpte); 4205 } 4206 } 4207 } else { 4208 mpte = NULL; 4209 } 4210 4211 sched_pin(); 4212 pte = pmap_pte_quick(pmap, va); 4213 if (*pte) { 4214 if (mpte != NULL) 4215 mpte->ref_count--; 4216 sched_unpin(); 4217 return (NULL); 4218 } 4219 4220 /* 4221 * Enter on the PV list if part of our managed memory. 4222 */ 4223 if ((m->oflags & VPO_UNMANAGED) == 0 && 4224 !pmap_try_insert_pv_entry(pmap, va, m)) { 4225 if (mpte != NULL) 4226 pmap_abort_ptp(pmap, va, mpte); 4227 sched_unpin(); 4228 return (NULL); 4229 } 4230 4231 /* 4232 * Increment counters 4233 */ 4234 pmap->pm_stats.resident_count++; 4235 4236 newpte = VM_PAGE_TO_PHYS(m) | PG_V | 4237 pmap_cache_bits(pmap, m->md.pat_mode, 0); 4238 if ((m->oflags & VPO_UNMANAGED) == 0) 4239 newpte |= PG_MANAGED; 4240 #ifdef PMAP_PAE_COMP 4241 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 4242 newpte |= pg_nx; 4243 #endif 4244 if (pmap != kernel_pmap) 4245 newpte |= PG_U; 4246 pte_store_zero(pte, newpte); 4247 4248 #if VM_NRESERVLEVEL > 0 4249 /* 4250 * If both the PTP and the reservation are fully populated, then 4251 * attempt promotion. 4252 */ 4253 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 4254 (m->flags & PG_FICTITIOUS) == 0 && 4255 vm_reserv_level_iffullpop(m) == 0) { 4256 if (pde == NULL) 4257 pde = pmap_pde(pmap, va); 4258 4259 /* 4260 * If promotion succeeds, then the next call to this function 4261 * should not be given the unmapped PTP as a hint. 4262 */ 4263 if (pmap_promote_pde(pmap, pde, va, mpte)) 4264 mpte = NULL; 4265 } 4266 #endif 4267 4268 sched_unpin(); 4269 return (mpte); 4270 } 4271 4272 /* 4273 * Make a temporary mapping for a physical address. This is only intended 4274 * to be used for panic dumps. 4275 */ 4276 static void * 4277 __CONCAT(PMTYPE, kenter_temporary)(vm_paddr_t pa, int i) 4278 { 4279 vm_offset_t va; 4280 4281 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 4282 pmap_kenter(va, pa); 4283 invlpg(va); 4284 return ((void *)crashdumpmap); 4285 } 4286 4287 /* 4288 * This code maps large physical mmap regions into the 4289 * processor address space. Note that some shortcuts 4290 * are taken, but the code works. 4291 */ 4292 static void 4293 __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr, 4294 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 4295 { 4296 pd_entry_t *pde; 4297 vm_paddr_t pa, ptepa; 4298 vm_page_t p; 4299 int pat_mode; 4300 4301 VM_OBJECT_ASSERT_WLOCKED(object); 4302 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4303 ("pmap_object_init_pt: non-device object")); 4304 if (pg_ps_enabled && 4305 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 4306 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4307 return; 4308 p = vm_page_lookup(object, pindex); 4309 KASSERT(vm_page_all_valid(p), 4310 ("pmap_object_init_pt: invalid page %p", p)); 4311 pat_mode = p->md.pat_mode; 4312 4313 /* 4314 * Abort the mapping if the first page is not physically 4315 * aligned to a 2/4MB page boundary. 4316 */ 4317 ptepa = VM_PAGE_TO_PHYS(p); 4318 if (ptepa & (NBPDR - 1)) 4319 return; 4320 4321 /* 4322 * Skip the first page. Abort the mapping if the rest of 4323 * the pages are not physically contiguous or have differing 4324 * memory attributes. 4325 */ 4326 p = TAILQ_NEXT(p, listq); 4327 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 4328 pa += PAGE_SIZE) { 4329 KASSERT(vm_page_all_valid(p), 4330 ("pmap_object_init_pt: invalid page %p", p)); 4331 if (pa != VM_PAGE_TO_PHYS(p) || 4332 pat_mode != p->md.pat_mode) 4333 return; 4334 p = TAILQ_NEXT(p, listq); 4335 } 4336 4337 /* 4338 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 4339 * "size" is a multiple of 2/4M, adding the PAT setting to 4340 * "pa" will not affect the termination of this loop. 4341 */ 4342 PMAP_LOCK(pmap); 4343 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); 4344 pa < ptepa + size; pa += NBPDR) { 4345 pde = pmap_pde(pmap, addr); 4346 if (*pde == 0) { 4347 pde_store(pde, pa | PG_PS | PG_M | PG_A | 4348 PG_U | PG_RW | PG_V); 4349 pmap->pm_stats.resident_count += NBPDR / 4350 PAGE_SIZE; 4351 pmap_pde_mappings++; 4352 } 4353 /* Else continue on if the PDE is already valid. */ 4354 addr += NBPDR; 4355 } 4356 PMAP_UNLOCK(pmap); 4357 } 4358 } 4359 4360 /* 4361 * Clear the wired attribute from the mappings for the specified range of 4362 * addresses in the given pmap. Every valid mapping within that range 4363 * must have the wired attribute set. In contrast, invalid mappings 4364 * cannot have the wired attribute set, so they are ignored. 4365 * 4366 * The wired attribute of the page table entry is not a hardware feature, 4367 * so there is no need to invalidate any TLB entries. 4368 */ 4369 static void 4370 __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4371 { 4372 vm_offset_t pdnxt; 4373 pd_entry_t *pde; 4374 pt_entry_t *pte; 4375 boolean_t pv_lists_locked; 4376 4377 if (pmap_is_current(pmap)) 4378 pv_lists_locked = FALSE; 4379 else { 4380 pv_lists_locked = TRUE; 4381 resume: 4382 rw_wlock(&pvh_global_lock); 4383 sched_pin(); 4384 } 4385 PMAP_LOCK(pmap); 4386 for (; sva < eva; sva = pdnxt) { 4387 pdnxt = (sva + NBPDR) & ~PDRMASK; 4388 if (pdnxt < sva) 4389 pdnxt = eva; 4390 pde = pmap_pde(pmap, sva); 4391 if ((*pde & PG_V) == 0) 4392 continue; 4393 if ((*pde & PG_PS) != 0) { 4394 if ((*pde & PG_W) == 0) 4395 panic("pmap_unwire: pde %#jx is missing PG_W", 4396 (uintmax_t)*pde); 4397 4398 /* 4399 * Are we unwiring the entire large page? If not, 4400 * demote the mapping and fall through. 4401 */ 4402 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 4403 /* 4404 * Regardless of whether a pde (or pte) is 32 4405 * or 64 bits in size, PG_W is among the least 4406 * significant 32 bits. 4407 */ 4408 atomic_clear_int((u_int *)pde, PG_W); 4409 pmap->pm_stats.wired_count -= NBPDR / 4410 PAGE_SIZE; 4411 continue; 4412 } else { 4413 if (!pv_lists_locked) { 4414 pv_lists_locked = TRUE; 4415 if (!rw_try_wlock(&pvh_global_lock)) { 4416 PMAP_UNLOCK(pmap); 4417 /* Repeat sva. */ 4418 goto resume; 4419 } 4420 sched_pin(); 4421 } 4422 if (!pmap_demote_pde(pmap, pde, sva)) 4423 panic("pmap_unwire: demotion failed"); 4424 } 4425 } 4426 if (pdnxt > eva) 4427 pdnxt = eva; 4428 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 4429 sva += PAGE_SIZE) { 4430 if ((*pte & PG_V) == 0) 4431 continue; 4432 if ((*pte & PG_W) == 0) 4433 panic("pmap_unwire: pte %#jx is missing PG_W", 4434 (uintmax_t)*pte); 4435 4436 /* 4437 * PG_W must be cleared atomically. Although the pmap 4438 * lock synchronizes access to PG_W, another processor 4439 * could be setting PG_M and/or PG_A concurrently. 4440 * 4441 * PG_W is among the least significant 32 bits. 4442 */ 4443 atomic_clear_int((u_int *)pte, PG_W); 4444 pmap->pm_stats.wired_count--; 4445 } 4446 } 4447 if (pv_lists_locked) { 4448 sched_unpin(); 4449 rw_wunlock(&pvh_global_lock); 4450 } 4451 PMAP_UNLOCK(pmap); 4452 } 4453 4454 /* 4455 * Copy the range specified by src_addr/len 4456 * from the source map to the range dst_addr/len 4457 * in the destination map. 4458 * 4459 * This routine is only advisory and need not do anything. Since 4460 * current pmap is always the kernel pmap when executing in 4461 * kernel, and we do not copy from the kernel pmap to a user 4462 * pmap, this optimization is not usable in 4/4G full split i386 4463 * world. 4464 */ 4465 4466 static void 4467 __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 4468 vm_size_t len, vm_offset_t src_addr) 4469 { 4470 pt_entry_t *src_pte, *dst_pte, ptetemp; 4471 pd_entry_t srcptepaddr; 4472 vm_page_t dstmpte, srcmpte; 4473 vm_offset_t addr, end_addr, pdnxt; 4474 u_int ptepindex; 4475 4476 if (dst_addr != src_addr) 4477 return; 4478 4479 end_addr = src_addr + len; 4480 4481 rw_wlock(&pvh_global_lock); 4482 if (dst_pmap < src_pmap) { 4483 PMAP_LOCK(dst_pmap); 4484 PMAP_LOCK(src_pmap); 4485 } else { 4486 PMAP_LOCK(src_pmap); 4487 PMAP_LOCK(dst_pmap); 4488 } 4489 sched_pin(); 4490 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 4491 KASSERT(addr < PMAP_TRM_MIN_ADDRESS, 4492 ("pmap_copy: invalid to pmap_copy the trampoline")); 4493 4494 pdnxt = (addr + NBPDR) & ~PDRMASK; 4495 if (pdnxt < addr) 4496 pdnxt = end_addr; 4497 ptepindex = addr >> PDRSHIFT; 4498 4499 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 4500 if (srcptepaddr == 0) 4501 continue; 4502 4503 if (srcptepaddr & PG_PS) { 4504 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) 4505 continue; 4506 if (dst_pmap->pm_pdir[ptepindex] == 0 && 4507 ((srcptepaddr & PG_MANAGED) == 0 || 4508 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, 4509 PMAP_ENTER_NORECLAIM))) { 4510 dst_pmap->pm_pdir[ptepindex] = srcptepaddr & 4511 ~PG_W; 4512 dst_pmap->pm_stats.resident_count += 4513 NBPDR / PAGE_SIZE; 4514 pmap_pde_mappings++; 4515 } 4516 continue; 4517 } 4518 4519 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 4520 KASSERT(srcmpte->ref_count > 0, 4521 ("pmap_copy: source page table page is unused")); 4522 4523 if (pdnxt > end_addr) 4524 pdnxt = end_addr; 4525 4526 src_pte = pmap_pte_quick3(src_pmap, addr); 4527 while (addr < pdnxt) { 4528 ptetemp = *src_pte; 4529 /* 4530 * we only virtual copy managed pages 4531 */ 4532 if ((ptetemp & PG_MANAGED) != 0) { 4533 dstmpte = pmap_allocpte(dst_pmap, addr, 4534 PMAP_ENTER_NOSLEEP); 4535 if (dstmpte == NULL) 4536 goto out; 4537 dst_pte = pmap_pte_quick(dst_pmap, addr); 4538 if (*dst_pte == 0 && 4539 pmap_try_insert_pv_entry(dst_pmap, addr, 4540 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { 4541 /* 4542 * Clear the wired, modified, and 4543 * accessed (referenced) bits 4544 * during the copy. 4545 */ 4546 *dst_pte = ptetemp & ~(PG_W | PG_M | 4547 PG_A); 4548 dst_pmap->pm_stats.resident_count++; 4549 } else { 4550 pmap_abort_ptp(dst_pmap, addr, dstmpte); 4551 goto out; 4552 } 4553 if (dstmpte->ref_count >= srcmpte->ref_count) 4554 break; 4555 } 4556 addr += PAGE_SIZE; 4557 src_pte++; 4558 } 4559 } 4560 out: 4561 sched_unpin(); 4562 rw_wunlock(&pvh_global_lock); 4563 PMAP_UNLOCK(src_pmap); 4564 PMAP_UNLOCK(dst_pmap); 4565 } 4566 4567 /* 4568 * Zero 1 page of virtual memory mapped from a hardware page by the caller. 4569 */ 4570 static __inline void 4571 pagezero(void *page) 4572 { 4573 #if defined(I686_CPU) 4574 if (cpu_class == CPUCLASS_686) { 4575 if (cpu_feature & CPUID_SSE2) 4576 sse2_pagezero(page); 4577 else 4578 i686_pagezero(page); 4579 } else 4580 #endif 4581 bzero(page, PAGE_SIZE); 4582 } 4583 4584 /* 4585 * Zero the specified hardware page. 4586 */ 4587 static void 4588 __CONCAT(PMTYPE, zero_page)(vm_page_t m) 4589 { 4590 pt_entry_t *cmap_pte2; 4591 struct pcpu *pc; 4592 4593 sched_pin(); 4594 pc = get_pcpu(); 4595 cmap_pte2 = pc->pc_cmap_pte2; 4596 mtx_lock(&pc->pc_cmap_lock); 4597 if (*cmap_pte2) 4598 panic("pmap_zero_page: CMAP2 busy"); 4599 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4600 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4601 invlcaddr(pc->pc_cmap_addr2); 4602 pagezero(pc->pc_cmap_addr2); 4603 *cmap_pte2 = 0; 4604 4605 /* 4606 * Unpin the thread before releasing the lock. Otherwise the thread 4607 * could be rescheduled while still bound to the current CPU, only 4608 * to unpin itself immediately upon resuming execution. 4609 */ 4610 sched_unpin(); 4611 mtx_unlock(&pc->pc_cmap_lock); 4612 } 4613 4614 /* 4615 * Zero an area within a single hardware page. off and size must not 4616 * cover an area beyond a single hardware page. 4617 */ 4618 static void 4619 __CONCAT(PMTYPE, zero_page_area)(vm_page_t m, int off, int size) 4620 { 4621 pt_entry_t *cmap_pte2; 4622 struct pcpu *pc; 4623 4624 sched_pin(); 4625 pc = get_pcpu(); 4626 cmap_pte2 = pc->pc_cmap_pte2; 4627 mtx_lock(&pc->pc_cmap_lock); 4628 if (*cmap_pte2) 4629 panic("pmap_zero_page_area: CMAP2 busy"); 4630 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4631 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4632 invlcaddr(pc->pc_cmap_addr2); 4633 if (off == 0 && size == PAGE_SIZE) 4634 pagezero(pc->pc_cmap_addr2); 4635 else 4636 bzero(pc->pc_cmap_addr2 + off, size); 4637 *cmap_pte2 = 0; 4638 sched_unpin(); 4639 mtx_unlock(&pc->pc_cmap_lock); 4640 } 4641 4642 /* 4643 * Copy 1 specified hardware page to another. 4644 */ 4645 static void 4646 __CONCAT(PMTYPE, copy_page)(vm_page_t src, vm_page_t dst) 4647 { 4648 pt_entry_t *cmap_pte1, *cmap_pte2; 4649 struct pcpu *pc; 4650 4651 sched_pin(); 4652 pc = get_pcpu(); 4653 cmap_pte1 = pc->pc_cmap_pte1; 4654 cmap_pte2 = pc->pc_cmap_pte2; 4655 mtx_lock(&pc->pc_cmap_lock); 4656 if (*cmap_pte1) 4657 panic("pmap_copy_page: CMAP1 busy"); 4658 if (*cmap_pte2) 4659 panic("pmap_copy_page: CMAP2 busy"); 4660 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | 4661 pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0); 4662 invlcaddr(pc->pc_cmap_addr1); 4663 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | 4664 pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0); 4665 invlcaddr(pc->pc_cmap_addr2); 4666 bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE); 4667 *cmap_pte1 = 0; 4668 *cmap_pte2 = 0; 4669 sched_unpin(); 4670 mtx_unlock(&pc->pc_cmap_lock); 4671 } 4672 4673 static void 4674 __CONCAT(PMTYPE, copy_pages)(vm_page_t ma[], vm_offset_t a_offset, 4675 vm_page_t mb[], vm_offset_t b_offset, int xfersize) 4676 { 4677 vm_page_t a_pg, b_pg; 4678 char *a_cp, *b_cp; 4679 vm_offset_t a_pg_offset, b_pg_offset; 4680 pt_entry_t *cmap_pte1, *cmap_pte2; 4681 struct pcpu *pc; 4682 int cnt; 4683 4684 sched_pin(); 4685 pc = get_pcpu(); 4686 cmap_pte1 = pc->pc_cmap_pte1; 4687 cmap_pte2 = pc->pc_cmap_pte2; 4688 mtx_lock(&pc->pc_cmap_lock); 4689 if (*cmap_pte1 != 0) 4690 panic("pmap_copy_pages: CMAP1 busy"); 4691 if (*cmap_pte2 != 0) 4692 panic("pmap_copy_pages: CMAP2 busy"); 4693 while (xfersize > 0) { 4694 a_pg = ma[a_offset >> PAGE_SHIFT]; 4695 a_pg_offset = a_offset & PAGE_MASK; 4696 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 4697 b_pg = mb[b_offset >> PAGE_SHIFT]; 4698 b_pg_offset = b_offset & PAGE_MASK; 4699 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 4700 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | 4701 pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0); 4702 invlcaddr(pc->pc_cmap_addr1); 4703 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | 4704 PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0); 4705 invlcaddr(pc->pc_cmap_addr2); 4706 a_cp = pc->pc_cmap_addr1 + a_pg_offset; 4707 b_cp = pc->pc_cmap_addr2 + b_pg_offset; 4708 bcopy(a_cp, b_cp, cnt); 4709 a_offset += cnt; 4710 b_offset += cnt; 4711 xfersize -= cnt; 4712 } 4713 *cmap_pte1 = 0; 4714 *cmap_pte2 = 0; 4715 sched_unpin(); 4716 mtx_unlock(&pc->pc_cmap_lock); 4717 } 4718 4719 /* 4720 * Returns true if the pmap's pv is one of the first 4721 * 16 pvs linked to from this page. This count may 4722 * be changed upwards or downwards in the future; it 4723 * is only necessary that true be returned for a small 4724 * subset of pmaps for proper page aging. 4725 */ 4726 static boolean_t 4727 __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m) 4728 { 4729 struct md_page *pvh; 4730 pv_entry_t pv; 4731 int loops = 0; 4732 boolean_t rv; 4733 4734 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4735 ("pmap_page_exists_quick: page %p is not managed", m)); 4736 rv = FALSE; 4737 rw_wlock(&pvh_global_lock); 4738 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 4739 if (PV_PMAP(pv) == pmap) { 4740 rv = TRUE; 4741 break; 4742 } 4743 loops++; 4744 if (loops >= 16) 4745 break; 4746 } 4747 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 4748 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4749 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4750 if (PV_PMAP(pv) == pmap) { 4751 rv = TRUE; 4752 break; 4753 } 4754 loops++; 4755 if (loops >= 16) 4756 break; 4757 } 4758 } 4759 rw_wunlock(&pvh_global_lock); 4760 return (rv); 4761 } 4762 4763 /* 4764 * pmap_page_wired_mappings: 4765 * 4766 * Return the number of managed mappings to the given physical page 4767 * that are wired. 4768 */ 4769 static int 4770 __CONCAT(PMTYPE, page_wired_mappings)(vm_page_t m) 4771 { 4772 int count; 4773 4774 count = 0; 4775 if ((m->oflags & VPO_UNMANAGED) != 0) 4776 return (count); 4777 rw_wlock(&pvh_global_lock); 4778 count = pmap_pvh_wired_mappings(&m->md, count); 4779 if ((m->flags & PG_FICTITIOUS) == 0) { 4780 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 4781 count); 4782 } 4783 rw_wunlock(&pvh_global_lock); 4784 return (count); 4785 } 4786 4787 /* 4788 * pmap_pvh_wired_mappings: 4789 * 4790 * Return the updated number "count" of managed mappings that are wired. 4791 */ 4792 static int 4793 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 4794 { 4795 pmap_t pmap; 4796 pt_entry_t *pte; 4797 pv_entry_t pv; 4798 4799 rw_assert(&pvh_global_lock, RA_WLOCKED); 4800 sched_pin(); 4801 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4802 pmap = PV_PMAP(pv); 4803 PMAP_LOCK(pmap); 4804 pte = pmap_pte_quick(pmap, pv->pv_va); 4805 if ((*pte & PG_W) != 0) 4806 count++; 4807 PMAP_UNLOCK(pmap); 4808 } 4809 sched_unpin(); 4810 return (count); 4811 } 4812 4813 /* 4814 * Returns TRUE if the given page is mapped individually or as part of 4815 * a 4mpage. Otherwise, returns FALSE. 4816 */ 4817 static boolean_t 4818 __CONCAT(PMTYPE, page_is_mapped)(vm_page_t m) 4819 { 4820 boolean_t rv; 4821 4822 if ((m->oflags & VPO_UNMANAGED) != 0) 4823 return (FALSE); 4824 rw_wlock(&pvh_global_lock); 4825 rv = !TAILQ_EMPTY(&m->md.pv_list) || 4826 ((m->flags & PG_FICTITIOUS) == 0 && 4827 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 4828 rw_wunlock(&pvh_global_lock); 4829 return (rv); 4830 } 4831 4832 /* 4833 * Remove all pages from specified address space 4834 * this aids process exit speeds. Also, this code 4835 * is special cased for current process only, but 4836 * can have the more generic (and slightly slower) 4837 * mode enabled. This is much faster than pmap_remove 4838 * in the case of running down an entire address space. 4839 */ 4840 static void 4841 __CONCAT(PMTYPE, remove_pages)(pmap_t pmap) 4842 { 4843 pt_entry_t *pte, tpte; 4844 vm_page_t m, mpte, mt; 4845 pv_entry_t pv; 4846 struct md_page *pvh; 4847 struct pv_chunk *pc, *npc; 4848 struct spglist free; 4849 int field, idx; 4850 int32_t bit; 4851 uint32_t inuse, bitmask; 4852 int allfree; 4853 4854 if (pmap != PCPU_GET(curpmap)) { 4855 printf("warning: pmap_remove_pages called with non-current pmap\n"); 4856 return; 4857 } 4858 SLIST_INIT(&free); 4859 rw_wlock(&pvh_global_lock); 4860 PMAP_LOCK(pmap); 4861 sched_pin(); 4862 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4863 KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, 4864 pc->pc_pmap)); 4865 allfree = 1; 4866 for (field = 0; field < _NPCM; field++) { 4867 inuse = ~pc->pc_map[field] & pc_freemask[field]; 4868 while (inuse != 0) { 4869 bit = bsfl(inuse); 4870 bitmask = 1UL << bit; 4871 idx = field * 32 + bit; 4872 pv = &pc->pc_pventry[idx]; 4873 inuse &= ~bitmask; 4874 4875 pte = pmap_pde(pmap, pv->pv_va); 4876 tpte = *pte; 4877 if ((tpte & PG_PS) == 0) { 4878 pte = pmap_pte_quick(pmap, pv->pv_va); 4879 tpte = *pte & ~PG_PTE_PAT; 4880 } 4881 4882 if (tpte == 0) { 4883 printf( 4884 "TPTE at %p IS ZERO @ VA %08x\n", 4885 pte, pv->pv_va); 4886 panic("bad pte"); 4887 } 4888 4889 /* 4890 * We cannot remove wired pages from a process' mapping at this time 4891 */ 4892 if (tpte & PG_W) { 4893 allfree = 0; 4894 continue; 4895 } 4896 4897 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 4898 KASSERT(m->phys_addr == (tpte & PG_FRAME), 4899 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 4900 m, (uintmax_t)m->phys_addr, 4901 (uintmax_t)tpte)); 4902 4903 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4904 m < &vm_page_array[vm_page_array_size], 4905 ("pmap_remove_pages: bad tpte %#jx", 4906 (uintmax_t)tpte)); 4907 4908 pte_clear(pte); 4909 4910 /* 4911 * Update the vm_page_t clean/reference bits. 4912 */ 4913 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 4914 if ((tpte & PG_PS) != 0) { 4915 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4916 vm_page_dirty(mt); 4917 } else 4918 vm_page_dirty(m); 4919 } 4920 4921 /* Mark free */ 4922 PV_STAT(pv_entry_frees++); 4923 PV_STAT(pv_entry_spare++); 4924 pv_entry_count--; 4925 pc->pc_map[field] |= bitmask; 4926 if ((tpte & PG_PS) != 0) { 4927 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 4928 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 4929 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4930 if (TAILQ_EMPTY(&pvh->pv_list)) { 4931 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4932 if (TAILQ_EMPTY(&mt->md.pv_list)) 4933 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4934 } 4935 mpte = pmap_remove_pt_page(pmap, pv->pv_va); 4936 if (mpte != NULL) { 4937 KASSERT(vm_page_any_valid(mpte), 4938 ("pmap_remove_pages: pte page not promoted")); 4939 pmap->pm_stats.resident_count--; 4940 KASSERT(mpte->ref_count == NPTEPG, 4941 ("pmap_remove_pages: pte page ref count error")); 4942 mpte->ref_count = 0; 4943 pmap_add_delayed_free_list(mpte, &free, FALSE); 4944 } 4945 } else { 4946 pmap->pm_stats.resident_count--; 4947 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4948 if (TAILQ_EMPTY(&m->md.pv_list) && 4949 (m->flags & PG_FICTITIOUS) == 0) { 4950 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4951 if (TAILQ_EMPTY(&pvh->pv_list)) 4952 vm_page_aflag_clear(m, PGA_WRITEABLE); 4953 } 4954 pmap_unuse_pt(pmap, pv->pv_va, &free); 4955 } 4956 } 4957 } 4958 if (allfree) { 4959 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4960 free_pv_chunk(pc); 4961 } 4962 } 4963 sched_unpin(); 4964 pmap_invalidate_all_int(pmap); 4965 rw_wunlock(&pvh_global_lock); 4966 PMAP_UNLOCK(pmap); 4967 vm_page_free_pages_toq(&free, true); 4968 } 4969 4970 /* 4971 * pmap_is_modified: 4972 * 4973 * Return whether or not the specified physical page was modified 4974 * in any physical maps. 4975 */ 4976 static boolean_t 4977 __CONCAT(PMTYPE, is_modified)(vm_page_t m) 4978 { 4979 boolean_t rv; 4980 4981 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4982 ("pmap_is_modified: page %p is not managed", m)); 4983 4984 /* 4985 * If the page is not busied then this check is racy. 4986 */ 4987 if (!pmap_page_is_write_mapped(m)) 4988 return (FALSE); 4989 rw_wlock(&pvh_global_lock); 4990 rv = pmap_is_modified_pvh(&m->md) || 4991 ((m->flags & PG_FICTITIOUS) == 0 && 4992 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4993 rw_wunlock(&pvh_global_lock); 4994 return (rv); 4995 } 4996 4997 /* 4998 * Returns TRUE if any of the given mappings were used to modify 4999 * physical memory. Otherwise, returns FALSE. Both page and 2mpage 5000 * mappings are supported. 5001 */ 5002 static boolean_t 5003 pmap_is_modified_pvh(struct md_page *pvh) 5004 { 5005 pv_entry_t pv; 5006 pt_entry_t *pte; 5007 pmap_t pmap; 5008 boolean_t rv; 5009 5010 rw_assert(&pvh_global_lock, RA_WLOCKED); 5011 rv = FALSE; 5012 sched_pin(); 5013 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5014 pmap = PV_PMAP(pv); 5015 PMAP_LOCK(pmap); 5016 pte = pmap_pte_quick(pmap, pv->pv_va); 5017 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW); 5018 PMAP_UNLOCK(pmap); 5019 if (rv) 5020 break; 5021 } 5022 sched_unpin(); 5023 return (rv); 5024 } 5025 5026 /* 5027 * pmap_is_prefaultable: 5028 * 5029 * Return whether or not the specified virtual address is elgible 5030 * for prefault. 5031 */ 5032 static boolean_t 5033 __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr) 5034 { 5035 pd_entry_t pde; 5036 boolean_t rv; 5037 5038 rv = FALSE; 5039 PMAP_LOCK(pmap); 5040 pde = *pmap_pde(pmap, addr); 5041 if (pde != 0 && (pde & PG_PS) == 0) 5042 rv = pmap_pte_ufast(pmap, addr, pde) == 0; 5043 PMAP_UNLOCK(pmap); 5044 return (rv); 5045 } 5046 5047 /* 5048 * pmap_is_referenced: 5049 * 5050 * Return whether or not the specified physical page was referenced 5051 * in any physical maps. 5052 */ 5053 static boolean_t 5054 __CONCAT(PMTYPE, is_referenced)(vm_page_t m) 5055 { 5056 boolean_t rv; 5057 5058 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5059 ("pmap_is_referenced: page %p is not managed", m)); 5060 rw_wlock(&pvh_global_lock); 5061 rv = pmap_is_referenced_pvh(&m->md) || 5062 ((m->flags & PG_FICTITIOUS) == 0 && 5063 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5064 rw_wunlock(&pvh_global_lock); 5065 return (rv); 5066 } 5067 5068 /* 5069 * Returns TRUE if any of the given mappings were referenced and FALSE 5070 * otherwise. Both page and 4mpage mappings are supported. 5071 */ 5072 static boolean_t 5073 pmap_is_referenced_pvh(struct md_page *pvh) 5074 { 5075 pv_entry_t pv; 5076 pt_entry_t *pte; 5077 pmap_t pmap; 5078 boolean_t rv; 5079 5080 rw_assert(&pvh_global_lock, RA_WLOCKED); 5081 rv = FALSE; 5082 sched_pin(); 5083 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5084 pmap = PV_PMAP(pv); 5085 PMAP_LOCK(pmap); 5086 pte = pmap_pte_quick(pmap, pv->pv_va); 5087 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 5088 PMAP_UNLOCK(pmap); 5089 if (rv) 5090 break; 5091 } 5092 sched_unpin(); 5093 return (rv); 5094 } 5095 5096 /* 5097 * Clear the write and modified bits in each of the given page's mappings. 5098 */ 5099 static void 5100 __CONCAT(PMTYPE, remove_write)(vm_page_t m) 5101 { 5102 struct md_page *pvh; 5103 pv_entry_t next_pv, pv; 5104 pmap_t pmap; 5105 pd_entry_t *pde; 5106 pt_entry_t oldpte, *pte; 5107 vm_offset_t va; 5108 5109 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5110 ("pmap_remove_write: page %p is not managed", m)); 5111 vm_page_assert_busied(m); 5112 5113 if (!pmap_page_is_write_mapped(m)) 5114 return; 5115 rw_wlock(&pvh_global_lock); 5116 sched_pin(); 5117 if ((m->flags & PG_FICTITIOUS) != 0) 5118 goto small_mappings; 5119 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5120 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5121 va = pv->pv_va; 5122 pmap = PV_PMAP(pv); 5123 PMAP_LOCK(pmap); 5124 pde = pmap_pde(pmap, va); 5125 if ((*pde & PG_RW) != 0) 5126 (void)pmap_demote_pde(pmap, pde, va); 5127 PMAP_UNLOCK(pmap); 5128 } 5129 small_mappings: 5130 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5131 pmap = PV_PMAP(pv); 5132 PMAP_LOCK(pmap); 5133 pde = pmap_pde(pmap, pv->pv_va); 5134 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found" 5135 " a 4mpage in page %p's pv list", m)); 5136 pte = pmap_pte_quick(pmap, pv->pv_va); 5137 retry: 5138 oldpte = *pte; 5139 if ((oldpte & PG_RW) != 0) { 5140 /* 5141 * Regardless of whether a pte is 32 or 64 bits 5142 * in size, PG_RW and PG_M are among the least 5143 * significant 32 bits. 5144 */ 5145 if (!atomic_cmpset_int((u_int *)pte, oldpte, 5146 oldpte & ~(PG_RW | PG_M))) 5147 goto retry; 5148 if ((oldpte & PG_M) != 0) 5149 vm_page_dirty(m); 5150 pmap_invalidate_page_int(pmap, pv->pv_va); 5151 } 5152 PMAP_UNLOCK(pmap); 5153 } 5154 vm_page_aflag_clear(m, PGA_WRITEABLE); 5155 sched_unpin(); 5156 rw_wunlock(&pvh_global_lock); 5157 } 5158 5159 /* 5160 * pmap_ts_referenced: 5161 * 5162 * Return a count of reference bits for a page, clearing those bits. 5163 * It is not necessary for every reference bit to be cleared, but it 5164 * is necessary that 0 only be returned when there are truly no 5165 * reference bits set. 5166 * 5167 * As an optimization, update the page's dirty field if a modified bit is 5168 * found while counting reference bits. This opportunistic update can be 5169 * performed at low cost and can eliminate the need for some future calls 5170 * to pmap_is_modified(). However, since this function stops after 5171 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5172 * dirty pages. Those dirty pages will only be detected by a future call 5173 * to pmap_is_modified(). 5174 */ 5175 static int 5176 __CONCAT(PMTYPE, ts_referenced)(vm_page_t m) 5177 { 5178 struct md_page *pvh; 5179 pv_entry_t pv, pvf; 5180 pmap_t pmap; 5181 pd_entry_t *pde; 5182 pt_entry_t *pte; 5183 vm_paddr_t pa; 5184 int rtval = 0; 5185 5186 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5187 ("pmap_ts_referenced: page %p is not managed", m)); 5188 pa = VM_PAGE_TO_PHYS(m); 5189 pvh = pa_to_pvh(pa); 5190 rw_wlock(&pvh_global_lock); 5191 sched_pin(); 5192 if ((m->flags & PG_FICTITIOUS) != 0 || 5193 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5194 goto small_mappings; 5195 pv = pvf; 5196 do { 5197 pmap = PV_PMAP(pv); 5198 PMAP_LOCK(pmap); 5199 pde = pmap_pde(pmap, pv->pv_va); 5200 if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5201 /* 5202 * Although "*pde" is mapping a 2/4MB page, because 5203 * this function is called at a 4KB page granularity, 5204 * we only update the 4KB page under test. 5205 */ 5206 vm_page_dirty(m); 5207 } 5208 if ((*pde & PG_A) != 0) { 5209 /* 5210 * Since this reference bit is shared by either 1024 5211 * or 512 4KB pages, it should not be cleared every 5212 * time it is tested. Apply a simple "hash" function 5213 * on the physical page number, the virtual superpage 5214 * number, and the pmap address to select one 4KB page 5215 * out of the 1024 or 512 on which testing the 5216 * reference bit will result in clearing that bit. 5217 * This function is designed to avoid the selection of 5218 * the same 4KB page for every 2- or 4MB page mapping. 5219 * 5220 * On demotion, a mapping that hasn't been referenced 5221 * is simply destroyed. To avoid the possibility of a 5222 * subsequent page fault on a demoted wired mapping, 5223 * always leave its reference bit set. Moreover, 5224 * since the superpage is wired, the current state of 5225 * its reference bit won't affect page replacement. 5226 */ 5227 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ 5228 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 5229 (*pde & PG_W) == 0) { 5230 atomic_clear_int((u_int *)pde, PG_A); 5231 pmap_invalidate_page_int(pmap, pv->pv_va); 5232 } 5233 rtval++; 5234 } 5235 PMAP_UNLOCK(pmap); 5236 /* Rotate the PV list if it has more than one entry. */ 5237 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5238 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5239 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5240 } 5241 if (rtval >= PMAP_TS_REFERENCED_MAX) 5242 goto out; 5243 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5244 small_mappings: 5245 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5246 goto out; 5247 pv = pvf; 5248 do { 5249 pmap = PV_PMAP(pv); 5250 PMAP_LOCK(pmap); 5251 pde = pmap_pde(pmap, pv->pv_va); 5252 KASSERT((*pde & PG_PS) == 0, 5253 ("pmap_ts_referenced: found a 4mpage in page %p's pv list", 5254 m)); 5255 pte = pmap_pte_quick(pmap, pv->pv_va); 5256 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5257 vm_page_dirty(m); 5258 if ((*pte & PG_A) != 0) { 5259 atomic_clear_int((u_int *)pte, PG_A); 5260 pmap_invalidate_page_int(pmap, pv->pv_va); 5261 rtval++; 5262 } 5263 PMAP_UNLOCK(pmap); 5264 /* Rotate the PV list if it has more than one entry. */ 5265 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5266 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5267 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5268 } 5269 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5270 PMAP_TS_REFERENCED_MAX); 5271 out: 5272 sched_unpin(); 5273 rw_wunlock(&pvh_global_lock); 5274 return (rtval); 5275 } 5276 5277 /* 5278 * Apply the given advice to the specified range of addresses within the 5279 * given pmap. Depending on the advice, clear the referenced and/or 5280 * modified flags in each mapping and set the mapped page's dirty field. 5281 */ 5282 static void 5283 __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 5284 int advice) 5285 { 5286 pd_entry_t oldpde, *pde; 5287 pt_entry_t *pte; 5288 vm_offset_t va, pdnxt; 5289 vm_page_t m; 5290 bool anychanged, pv_lists_locked; 5291 5292 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5293 return; 5294 if (pmap_is_current(pmap)) 5295 pv_lists_locked = false; 5296 else { 5297 pv_lists_locked = true; 5298 resume: 5299 rw_wlock(&pvh_global_lock); 5300 sched_pin(); 5301 } 5302 anychanged = false; 5303 PMAP_LOCK(pmap); 5304 for (; sva < eva; sva = pdnxt) { 5305 pdnxt = (sva + NBPDR) & ~PDRMASK; 5306 if (pdnxt < sva) 5307 pdnxt = eva; 5308 pde = pmap_pde(pmap, sva); 5309 oldpde = *pde; 5310 if ((oldpde & PG_V) == 0) 5311 continue; 5312 else if ((oldpde & PG_PS) != 0) { 5313 if ((oldpde & PG_MANAGED) == 0) 5314 continue; 5315 if (!pv_lists_locked) { 5316 pv_lists_locked = true; 5317 if (!rw_try_wlock(&pvh_global_lock)) { 5318 if (anychanged) 5319 pmap_invalidate_all_int(pmap); 5320 PMAP_UNLOCK(pmap); 5321 goto resume; 5322 } 5323 sched_pin(); 5324 } 5325 if (!pmap_demote_pde(pmap, pde, sva)) { 5326 /* 5327 * The large page mapping was destroyed. 5328 */ 5329 continue; 5330 } 5331 5332 /* 5333 * Unless the page mappings are wired, remove the 5334 * mapping to a single page so that a subsequent 5335 * access may repromote. Choosing the last page 5336 * within the address range [sva, min(pdnxt, eva)) 5337 * generally results in more repromotions. Since the 5338 * underlying page table page is fully populated, this 5339 * removal never frees a page table page. 5340 */ 5341 if ((oldpde & PG_W) == 0) { 5342 va = eva; 5343 if (va > pdnxt) 5344 va = pdnxt; 5345 va -= PAGE_SIZE; 5346 KASSERT(va >= sva, 5347 ("pmap_advise: no address gap")); 5348 pte = pmap_pte_quick(pmap, va); 5349 KASSERT((*pte & PG_V) != 0, 5350 ("pmap_advise: invalid PTE")); 5351 pmap_remove_pte(pmap, pte, va, NULL); 5352 anychanged = true; 5353 } 5354 } 5355 if (pdnxt > eva) 5356 pdnxt = eva; 5357 va = pdnxt; 5358 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 5359 sva += PAGE_SIZE) { 5360 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) 5361 goto maybe_invlrng; 5362 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5363 if (advice == MADV_DONTNEED) { 5364 /* 5365 * Future calls to pmap_is_modified() 5366 * can be avoided by making the page 5367 * dirty now. 5368 */ 5369 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 5370 vm_page_dirty(m); 5371 } 5372 atomic_clear_int((u_int *)pte, PG_M | PG_A); 5373 } else if ((*pte & PG_A) != 0) 5374 atomic_clear_int((u_int *)pte, PG_A); 5375 else 5376 goto maybe_invlrng; 5377 if ((*pte & PG_G) != 0) { 5378 if (va == pdnxt) 5379 va = sva; 5380 } else 5381 anychanged = true; 5382 continue; 5383 maybe_invlrng: 5384 if (va != pdnxt) { 5385 pmap_invalidate_range_int(pmap, va, sva); 5386 va = pdnxt; 5387 } 5388 } 5389 if (va != pdnxt) 5390 pmap_invalidate_range_int(pmap, va, sva); 5391 } 5392 if (anychanged) 5393 pmap_invalidate_all_int(pmap); 5394 if (pv_lists_locked) { 5395 sched_unpin(); 5396 rw_wunlock(&pvh_global_lock); 5397 } 5398 PMAP_UNLOCK(pmap); 5399 } 5400 5401 /* 5402 * Clear the modify bits on the specified physical page. 5403 */ 5404 static void 5405 __CONCAT(PMTYPE, clear_modify)(vm_page_t m) 5406 { 5407 struct md_page *pvh; 5408 pv_entry_t next_pv, pv; 5409 pmap_t pmap; 5410 pd_entry_t oldpde, *pde; 5411 pt_entry_t *pte; 5412 vm_offset_t va; 5413 5414 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5415 ("pmap_clear_modify: page %p is not managed", m)); 5416 vm_page_assert_busied(m); 5417 5418 if (!pmap_page_is_write_mapped(m)) 5419 return; 5420 rw_wlock(&pvh_global_lock); 5421 sched_pin(); 5422 if ((m->flags & PG_FICTITIOUS) != 0) 5423 goto small_mappings; 5424 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5425 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5426 va = pv->pv_va; 5427 pmap = PV_PMAP(pv); 5428 PMAP_LOCK(pmap); 5429 pde = pmap_pde(pmap, va); 5430 oldpde = *pde; 5431 /* If oldpde has PG_RW set, then it also has PG_M set. */ 5432 if ((oldpde & PG_RW) != 0 && 5433 pmap_demote_pde(pmap, pde, va) && 5434 (oldpde & PG_W) == 0) { 5435 /* 5436 * Write protect the mapping to a single page so that 5437 * a subsequent write access may repromote. 5438 */ 5439 va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); 5440 pte = pmap_pte_quick(pmap, va); 5441 /* 5442 * Regardless of whether a pte is 32 or 64 bits 5443 * in size, PG_RW and PG_M are among the least 5444 * significant 32 bits. 5445 */ 5446 atomic_clear_int((u_int *)pte, PG_M | PG_RW); 5447 vm_page_dirty(m); 5448 pmap_invalidate_page_int(pmap, va); 5449 } 5450 PMAP_UNLOCK(pmap); 5451 } 5452 small_mappings: 5453 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5454 pmap = PV_PMAP(pv); 5455 PMAP_LOCK(pmap); 5456 pde = pmap_pde(pmap, pv->pv_va); 5457 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" 5458 " a 4mpage in page %p's pv list", m)); 5459 pte = pmap_pte_quick(pmap, pv->pv_va); 5460 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5461 /* 5462 * Regardless of whether a pte is 32 or 64 bits 5463 * in size, PG_M is among the least significant 5464 * 32 bits. 5465 */ 5466 atomic_clear_int((u_int *)pte, PG_M); 5467 pmap_invalidate_page_int(pmap, pv->pv_va); 5468 } 5469 PMAP_UNLOCK(pmap); 5470 } 5471 sched_unpin(); 5472 rw_wunlock(&pvh_global_lock); 5473 } 5474 5475 /* 5476 * Miscellaneous support routines follow 5477 */ 5478 5479 /* Adjust the cache mode for a 4KB page mapped via a PTE. */ 5480 static __inline void 5481 pmap_pte_attr(pt_entry_t *pte, int cache_bits) 5482 { 5483 u_int opte, npte; 5484 5485 /* 5486 * The cache mode bits are all in the low 32-bits of the 5487 * PTE, so we can just spin on updating the low 32-bits. 5488 */ 5489 do { 5490 opte = *(u_int *)pte; 5491 npte = opte & ~PG_PTE_CACHE; 5492 npte |= cache_bits; 5493 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); 5494 } 5495 5496 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */ 5497 static __inline void 5498 pmap_pde_attr(pd_entry_t *pde, int cache_bits) 5499 { 5500 u_int opde, npde; 5501 5502 /* 5503 * The cache mode bits are all in the low 32-bits of the 5504 * PDE, so we can just spin on updating the low 32-bits. 5505 */ 5506 do { 5507 opde = *(u_int *)pde; 5508 npde = opde & ~PG_PDE_CACHE; 5509 npde |= cache_bits; 5510 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); 5511 } 5512 5513 /* 5514 * Map a set of physical memory pages into the kernel virtual 5515 * address space. Return a pointer to where it is mapped. This 5516 * routine is intended to be used for mapping device memory, 5517 * NOT real memory. 5518 */ 5519 static void * 5520 __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode, 5521 int flags) 5522 { 5523 struct pmap_preinit_mapping *ppim; 5524 vm_offset_t va, offset; 5525 vm_page_t m; 5526 vm_size_t tmpsize; 5527 int i; 5528 5529 offset = pa & PAGE_MASK; 5530 size = round_page(offset + size); 5531 pa = pa & PG_FRAME; 5532 5533 if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) { 5534 va = pa + PMAP_MAP_LOW; 5535 if ((flags & MAPDEV_SETATTR) == 0) 5536 return ((void *)(va + offset)); 5537 } else if (!pmap_initialized) { 5538 va = 0; 5539 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5540 ppim = pmap_preinit_mapping + i; 5541 if (ppim->va == 0) { 5542 ppim->pa = pa; 5543 ppim->sz = size; 5544 ppim->mode = mode; 5545 ppim->va = virtual_avail; 5546 virtual_avail += size; 5547 va = ppim->va; 5548 break; 5549 } 5550 } 5551 if (va == 0) 5552 panic("%s: too many preinit mappings", __func__); 5553 } else { 5554 /* 5555 * If we have a preinit mapping, re-use it. 5556 */ 5557 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5558 ppim = pmap_preinit_mapping + i; 5559 if (ppim->pa == pa && ppim->sz == size && 5560 (ppim->mode == mode || 5561 (flags & MAPDEV_SETATTR) == 0)) 5562 return ((void *)(ppim->va + offset)); 5563 } 5564 va = kva_alloc(size); 5565 if (va == 0) 5566 panic("%s: Couldn't allocate KVA", __func__); 5567 } 5568 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) { 5569 if ((flags & MAPDEV_SETATTR) == 0 && pmap_initialized) { 5570 m = PHYS_TO_VM_PAGE(pa); 5571 if (m != NULL && VM_PAGE_TO_PHYS(m) == pa) { 5572 pmap_kenter_attr(va + tmpsize, pa + tmpsize, 5573 m->md.pat_mode); 5574 continue; 5575 } 5576 } 5577 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 5578 } 5579 pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize); 5580 pmap_invalidate_cache_range(va, va + size); 5581 return ((void *)(va + offset)); 5582 } 5583 5584 static void 5585 __CONCAT(PMTYPE, unmapdev)(void *p, vm_size_t size) 5586 { 5587 struct pmap_preinit_mapping *ppim; 5588 vm_offset_t offset, va; 5589 int i; 5590 5591 va = (vm_offset_t)p; 5592 if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE) 5593 return; 5594 offset = va & PAGE_MASK; 5595 size = round_page(offset + size); 5596 va = trunc_page(va); 5597 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5598 ppim = pmap_preinit_mapping + i; 5599 if (ppim->va == va && ppim->sz == size) { 5600 if (pmap_initialized) 5601 return; 5602 ppim->pa = 0; 5603 ppim->va = 0; 5604 ppim->sz = 0; 5605 ppim->mode = 0; 5606 if (va + size == virtual_avail) 5607 virtual_avail = va; 5608 return; 5609 } 5610 } 5611 if (pmap_initialized) { 5612 pmap_qremove(va, atop(size)); 5613 kva_free(va, size); 5614 } 5615 } 5616 5617 /* 5618 * Sets the memory attribute for the specified page. 5619 */ 5620 static void 5621 __CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma) 5622 { 5623 5624 m->md.pat_mode = ma; 5625 if ((m->flags & PG_FICTITIOUS) != 0) 5626 return; 5627 5628 /* 5629 * If "m" is a normal page, flush it from the cache. 5630 * See pmap_invalidate_cache_range(). 5631 * 5632 * First, try to find an existing mapping of the page by sf 5633 * buffer. sf_buf_invalidate_cache() modifies mapping and 5634 * flushes the cache. 5635 */ 5636 if (sf_buf_invalidate_cache(m)) 5637 return; 5638 5639 /* 5640 * If page is not mapped by sf buffer, but CPU does not 5641 * support self snoop, map the page transient and do 5642 * invalidation. In the worst case, whole cache is flushed by 5643 * pmap_invalidate_cache_range(). 5644 */ 5645 if ((cpu_feature & CPUID_SS) == 0) 5646 pmap_flush_page(m); 5647 } 5648 5649 static void 5650 __CONCAT(PMTYPE, flush_page)(vm_page_t m) 5651 { 5652 pt_entry_t *cmap_pte2; 5653 struct pcpu *pc; 5654 vm_offset_t sva, eva; 5655 bool useclflushopt; 5656 5657 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0; 5658 if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) { 5659 sched_pin(); 5660 pc = get_pcpu(); 5661 cmap_pte2 = pc->pc_cmap_pte2; 5662 mtx_lock(&pc->pc_cmap_lock); 5663 if (*cmap_pte2) 5664 panic("pmap_flush_page: CMAP2 busy"); 5665 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | 5666 PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 5667 0); 5668 invlcaddr(pc->pc_cmap_addr2); 5669 sva = (vm_offset_t)pc->pc_cmap_addr2; 5670 eva = sva + PAGE_SIZE; 5671 5672 /* 5673 * Use mfence or sfence despite the ordering implied by 5674 * mtx_{un,}lock() because clflush on non-Intel CPUs 5675 * and clflushopt are not guaranteed to be ordered by 5676 * any other instruction. 5677 */ 5678 if (useclflushopt) 5679 sfence(); 5680 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5681 mfence(); 5682 for (; sva < eva; sva += cpu_clflush_line_size) { 5683 if (useclflushopt) 5684 clflushopt(sva); 5685 else 5686 clflush(sva); 5687 } 5688 if (useclflushopt) 5689 sfence(); 5690 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5691 mfence(); 5692 *cmap_pte2 = 0; 5693 sched_unpin(); 5694 mtx_unlock(&pc->pc_cmap_lock); 5695 } else 5696 pmap_invalidate_cache(); 5697 } 5698 5699 /* 5700 * Changes the specified virtual address range's memory type to that given by 5701 * the parameter "mode". The specified virtual address range must be 5702 * completely contained within either the kernel map. 5703 * 5704 * Returns zero if the change completed successfully, and either EINVAL or 5705 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 5706 * of the virtual address range was not mapped, and ENOMEM is returned if 5707 * there was insufficient memory available to complete the change. 5708 */ 5709 static int 5710 __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode) 5711 { 5712 vm_offset_t base, offset, tmpva; 5713 pd_entry_t *pde; 5714 pt_entry_t *pte; 5715 int cache_bits_pte, cache_bits_pde; 5716 boolean_t changed; 5717 5718 base = trunc_page(va); 5719 offset = va & PAGE_MASK; 5720 size = round_page(offset + size); 5721 5722 /* 5723 * Only supported on kernel virtual addresses above the recursive map. 5724 */ 5725 if (base < VM_MIN_KERNEL_ADDRESS) 5726 return (EINVAL); 5727 5728 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); 5729 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); 5730 changed = FALSE; 5731 5732 /* 5733 * Pages that aren't mapped aren't supported. Also break down 5734 * 2/4MB pages into 4KB pages if required. 5735 */ 5736 PMAP_LOCK(kernel_pmap); 5737 for (tmpva = base; tmpva < base + size; ) { 5738 pde = pmap_pde(kernel_pmap, tmpva); 5739 if (*pde == 0) { 5740 PMAP_UNLOCK(kernel_pmap); 5741 return (EINVAL); 5742 } 5743 if (*pde & PG_PS) { 5744 /* 5745 * If the current 2/4MB page already has 5746 * the required memory type, then we need not 5747 * demote this page. Just increment tmpva to 5748 * the next 2/4MB page frame. 5749 */ 5750 if ((*pde & PG_PDE_CACHE) == cache_bits_pde) { 5751 tmpva = trunc_4mpage(tmpva) + NBPDR; 5752 continue; 5753 } 5754 5755 /* 5756 * If the current offset aligns with a 2/4MB 5757 * page frame and there is at least 2/4MB left 5758 * within the range, then we need not break 5759 * down this page into 4KB pages. 5760 */ 5761 if ((tmpva & PDRMASK) == 0 && 5762 tmpva + PDRMASK < base + size) { 5763 tmpva += NBPDR; 5764 continue; 5765 } 5766 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) { 5767 PMAP_UNLOCK(kernel_pmap); 5768 return (ENOMEM); 5769 } 5770 } 5771 pte = vtopte(tmpva); 5772 if (*pte == 0) { 5773 PMAP_UNLOCK(kernel_pmap); 5774 return (EINVAL); 5775 } 5776 tmpva += PAGE_SIZE; 5777 } 5778 PMAP_UNLOCK(kernel_pmap); 5779 5780 /* 5781 * Ok, all the pages exist, so run through them updating their 5782 * cache mode if required. 5783 */ 5784 for (tmpva = base; tmpva < base + size; ) { 5785 pde = pmap_pde(kernel_pmap, tmpva); 5786 if (*pde & PG_PS) { 5787 if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { 5788 pmap_pde_attr(pde, cache_bits_pde); 5789 changed = TRUE; 5790 } 5791 tmpva = trunc_4mpage(tmpva) + NBPDR; 5792 } else { 5793 pte = vtopte(tmpva); 5794 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { 5795 pmap_pte_attr(pte, cache_bits_pte); 5796 changed = TRUE; 5797 } 5798 tmpva += PAGE_SIZE; 5799 } 5800 } 5801 5802 /* 5803 * Flush CPU caches to make sure any data isn't cached that 5804 * shouldn't be, etc. 5805 */ 5806 if (changed) { 5807 pmap_invalidate_range_int(kernel_pmap, base, tmpva); 5808 pmap_invalidate_cache_range(base, tmpva); 5809 } 5810 return (0); 5811 } 5812 5813 /* 5814 * Perform the pmap work for mincore(2). If the page is not both referenced and 5815 * modified by this pmap, returns its physical address so that the caller can 5816 * find other mappings. 5817 */ 5818 static int 5819 __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) 5820 { 5821 pd_entry_t pde; 5822 pt_entry_t pte; 5823 vm_paddr_t pa; 5824 int val; 5825 5826 PMAP_LOCK(pmap); 5827 pde = *pmap_pde(pmap, addr); 5828 if (pde != 0) { 5829 if ((pde & PG_PS) != 0) { 5830 pte = pde; 5831 /* Compute the physical address of the 4KB page. */ 5832 pa = ((pde & PG_PS_FRAME) | (addr & PDRMASK)) & 5833 PG_FRAME; 5834 val = MINCORE_PSIND(1); 5835 } else { 5836 pte = pmap_pte_ufast(pmap, addr, pde); 5837 pa = pte & PG_FRAME; 5838 val = 0; 5839 } 5840 } else { 5841 pte = 0; 5842 pa = 0; 5843 val = 0; 5844 } 5845 if ((pte & PG_V) != 0) { 5846 val |= MINCORE_INCORE; 5847 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5848 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 5849 if ((pte & PG_A) != 0) 5850 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 5851 } 5852 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 5853 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 5854 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 5855 *pap = pa; 5856 } 5857 PMAP_UNLOCK(pmap); 5858 return (val); 5859 } 5860 5861 static void 5862 __CONCAT(PMTYPE, activate)(struct thread *td) 5863 { 5864 pmap_t pmap, oldpmap; 5865 u_int cpuid; 5866 u_int32_t cr3; 5867 5868 critical_enter(); 5869 pmap = vmspace_pmap(td->td_proc->p_vmspace); 5870 oldpmap = PCPU_GET(curpmap); 5871 cpuid = PCPU_GET(cpuid); 5872 #if defined(SMP) 5873 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 5874 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5875 #else 5876 CPU_CLR(cpuid, &oldpmap->pm_active); 5877 CPU_SET(cpuid, &pmap->pm_active); 5878 #endif 5879 #ifdef PMAP_PAE_COMP 5880 cr3 = vtophys(pmap->pm_pdpt); 5881 #else 5882 cr3 = vtophys(pmap->pm_pdir); 5883 #endif 5884 /* 5885 * pmap_activate is for the current thread on the current cpu 5886 */ 5887 td->td_pcb->pcb_cr3 = cr3; 5888 PCPU_SET(curpmap, pmap); 5889 critical_exit(); 5890 } 5891 5892 static void 5893 __CONCAT(PMTYPE, activate_boot)(pmap_t pmap) 5894 { 5895 u_int cpuid; 5896 5897 cpuid = PCPU_GET(cpuid); 5898 #if defined(SMP) 5899 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5900 #else 5901 CPU_SET(cpuid, &pmap->pm_active); 5902 #endif 5903 PCPU_SET(curpmap, pmap); 5904 } 5905 5906 /* 5907 * Increase the starting virtual address of the given mapping if a 5908 * different alignment might result in more superpage mappings. 5909 */ 5910 static void 5911 __CONCAT(PMTYPE, align_superpage)(vm_object_t object, vm_ooffset_t offset, 5912 vm_offset_t *addr, vm_size_t size) 5913 { 5914 vm_offset_t superpage_offset; 5915 5916 if (size < NBPDR) 5917 return; 5918 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 5919 offset += ptoa(object->pg_color); 5920 superpage_offset = offset & PDRMASK; 5921 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 5922 (*addr & PDRMASK) == superpage_offset) 5923 return; 5924 if ((*addr & PDRMASK) < superpage_offset) 5925 *addr = (*addr & ~PDRMASK) + superpage_offset; 5926 else 5927 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 5928 } 5929 5930 static vm_offset_t 5931 __CONCAT(PMTYPE, quick_enter_page)(vm_page_t m) 5932 { 5933 vm_offset_t qaddr; 5934 pt_entry_t *pte; 5935 5936 critical_enter(); 5937 qaddr = PCPU_GET(qmap_addr); 5938 pte = vtopte(qaddr); 5939 5940 KASSERT(*pte == 0, 5941 ("pmap_quick_enter_page: PTE busy %#jx", (uintmax_t)*pte)); 5942 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 5943 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0); 5944 invlpg(qaddr); 5945 5946 return (qaddr); 5947 } 5948 5949 static void 5950 __CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr) 5951 { 5952 vm_offset_t qaddr; 5953 pt_entry_t *pte; 5954 5955 qaddr = PCPU_GET(qmap_addr); 5956 pte = vtopte(qaddr); 5957 5958 KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); 5959 KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); 5960 5961 *pte = 0; 5962 critical_exit(); 5963 } 5964 5965 static vmem_t *pmap_trm_arena; 5966 static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS; 5967 static int trm_guard = PAGE_SIZE; 5968 5969 static int 5970 pmap_trm_import(void *unused __unused, vmem_size_t size, int flags, 5971 vmem_addr_t *addrp) 5972 { 5973 vm_page_t m; 5974 vmem_addr_t af, addr, prev_addr; 5975 pt_entry_t *trm_pte; 5976 5977 prev_addr = atomic_load_int(&pmap_trm_arena_last); 5978 size = round_page(size) + trm_guard; 5979 for (;;) { 5980 if (prev_addr + size < prev_addr || prev_addr + size < size || 5981 prev_addr + size > PMAP_TRM_MAX_ADDRESS) 5982 return (ENOMEM); 5983 addr = prev_addr + size; 5984 if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr)) 5985 break; 5986 } 5987 prev_addr += trm_guard; 5988 trm_pte = PTmap + atop(prev_addr); 5989 for (af = prev_addr; af < addr; af += PAGE_SIZE) { 5990 m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK); 5991 pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | 5992 PG_M | PG_A | PG_RW | PG_V | pgeflag | 5993 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); 5994 } 5995 *addrp = prev_addr; 5996 return (0); 5997 } 5998 5999 void 6000 pmap_init_trm(void) 6001 { 6002 vm_page_t pd_m; 6003 6004 TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard); 6005 if ((trm_guard & PAGE_MASK) != 0) 6006 trm_guard = 0; 6007 pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); 6008 vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); 6009 pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK | 6010 VM_ALLOC_ZERO); 6011 PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | 6012 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); 6013 } 6014 6015 static void * 6016 __CONCAT(PMTYPE, trm_alloc)(size_t size, int flags) 6017 { 6018 vmem_addr_t res; 6019 int error; 6020 6021 MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0); 6022 error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int), 6023 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res); 6024 if (error != 0) 6025 return (NULL); 6026 if ((flags & M_ZERO) != 0) 6027 bzero((void *)res, size); 6028 return ((void *)res); 6029 } 6030 6031 static void 6032 __CONCAT(PMTYPE, trm_free)(void *addr, size_t size) 6033 { 6034 6035 vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4)); 6036 } 6037 6038 static void 6039 __CONCAT(PMTYPE, ksetrw)(vm_offset_t va) 6040 { 6041 6042 *vtopte(va) |= PG_RW; 6043 } 6044 6045 static void 6046 __CONCAT(PMTYPE, remap_lowptdi)(bool enable) 6047 { 6048 6049 PTD[KPTDI] = enable ? PTD[LOWPTDI] : 0; 6050 invltlb_glob(); 6051 } 6052 6053 static vm_offset_t 6054 __CONCAT(PMTYPE, get_map_low)(void) 6055 { 6056 6057 return (PMAP_MAP_LOW); 6058 } 6059 6060 static vm_offset_t 6061 __CONCAT(PMTYPE, get_vm_maxuser_address)(void) 6062 { 6063 6064 return (VM_MAXUSER_ADDRESS); 6065 } 6066 6067 static vm_paddr_t 6068 __CONCAT(PMTYPE, pg_frame)(vm_paddr_t pa) 6069 { 6070 6071 return (pa & PG_FRAME); 6072 } 6073 6074 static void 6075 __CONCAT(PMTYPE, sf_buf_map)(struct sf_buf *sf) 6076 { 6077 pt_entry_t opte, *ptep; 6078 6079 /* 6080 * Update the sf_buf's virtual-to-physical mapping, flushing the 6081 * virtual address from the TLB. Since the reference count for 6082 * the sf_buf's old mapping was zero, that mapping is not 6083 * currently in use. Consequently, there is no need to exchange 6084 * the old and new PTEs atomically, even under PAE. 6085 */ 6086 ptep = vtopte(sf->kva); 6087 opte = *ptep; 6088 *ptep = VM_PAGE_TO_PHYS(sf->m) | PG_RW | PG_V | 6089 pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, 0); 6090 6091 /* 6092 * Avoid unnecessary TLB invalidations: If the sf_buf's old 6093 * virtual-to-physical mapping was not used, then any processor 6094 * that has invalidated the sf_buf's virtual address from its TLB 6095 * since the last used mapping need not invalidate again. 6096 */ 6097 #ifdef SMP 6098 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6099 CPU_ZERO(&sf->cpumask); 6100 #else 6101 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6102 pmap_invalidate_page_int(kernel_pmap, sf->kva); 6103 #endif 6104 } 6105 6106 static void 6107 __CONCAT(PMTYPE, cp_slow0_map)(vm_offset_t kaddr, int plen, vm_page_t *ma) 6108 { 6109 pt_entry_t *pte; 6110 int i; 6111 6112 for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) { 6113 *pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) | 6114 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]), 6115 FALSE); 6116 invlpg(kaddr + ptoa(i)); 6117 } 6118 } 6119 6120 static u_int 6121 __CONCAT(PMTYPE, get_kcr3)(void) 6122 { 6123 6124 #ifdef PMAP_PAE_COMP 6125 return ((u_int)IdlePDPT); 6126 #else 6127 return ((u_int)IdlePTD); 6128 #endif 6129 } 6130 6131 static u_int 6132 __CONCAT(PMTYPE, get_cr3)(pmap_t pmap) 6133 { 6134 6135 #ifdef PMAP_PAE_COMP 6136 return ((u_int)vtophys(pmap->pm_pdpt)); 6137 #else 6138 return ((u_int)vtophys(pmap->pm_pdir)); 6139 #endif 6140 } 6141 6142 static caddr_t 6143 __CONCAT(PMTYPE, cmap3)(vm_paddr_t pa, u_int pte_bits) 6144 { 6145 pt_entry_t *pte; 6146 6147 pte = CMAP3; 6148 *pte = pa | pte_bits; 6149 invltlb(); 6150 return (CADDR3); 6151 } 6152 6153 static void 6154 __CONCAT(PMTYPE, basemem_setup)(u_int basemem) 6155 { 6156 pt_entry_t *pte; 6157 int i; 6158 6159 /* 6160 * Map pages between basemem and ISA_HOLE_START, if any, r/w into 6161 * the vm86 page table so that vm86 can scribble on them using 6162 * the vm86 map too. XXX: why 2 ways for this and only 1 way for 6163 * page 0, at least as initialized here? 6164 */ 6165 pte = (pt_entry_t *)vm86paddr; 6166 for (i = basemem / 4; i < 160; i++) 6167 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 6168 } 6169 6170 struct bios16_pmap_handle { 6171 pt_entry_t *pte; 6172 pd_entry_t *ptd; 6173 pt_entry_t orig_ptd; 6174 }; 6175 6176 static void * 6177 __CONCAT(PMTYPE, bios16_enter)(void) 6178 { 6179 struct bios16_pmap_handle *h; 6180 6181 /* 6182 * no page table, so create one and install it. 6183 */ 6184 h = malloc(sizeof(struct bios16_pmap_handle), M_TEMP, M_WAITOK); 6185 h->pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 6186 h->ptd = IdlePTD; 6187 *h->pte = vm86phystk | PG_RW | PG_V; 6188 h->orig_ptd = *h->ptd; 6189 *h->ptd = vtophys(h->pte) | PG_RW | PG_V; 6190 pmap_invalidate_all_int(kernel_pmap); /* XXX insurance for now */ 6191 return (h); 6192 } 6193 6194 static void 6195 __CONCAT(PMTYPE, bios16_leave)(void *arg) 6196 { 6197 struct bios16_pmap_handle *h; 6198 6199 h = arg; 6200 *h->ptd = h->orig_ptd; /* remove page table */ 6201 /* 6202 * XXX only needs to be invlpg(0) but that doesn't work on the 386 6203 */ 6204 pmap_invalidate_all_int(kernel_pmap); 6205 free(h->pte, M_TEMP); /* ... and free it */ 6206 } 6207 6208 struct pmap_kernel_map_range { 6209 vm_offset_t sva; 6210 pt_entry_t attrs; 6211 int ptes; 6212 int pdes; 6213 int pdpes; 6214 }; 6215 6216 static void 6217 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range, 6218 vm_offset_t eva) 6219 { 6220 const char *mode; 6221 int i, pat_idx; 6222 6223 if (eva <= range->sva) 6224 return; 6225 6226 pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true); 6227 for (i = 0; i < PAT_INDEX_SIZE; i++) 6228 if (pat_index[i] == pat_idx) 6229 break; 6230 6231 switch (i) { 6232 case PAT_WRITE_BACK: 6233 mode = "WB"; 6234 break; 6235 case PAT_WRITE_THROUGH: 6236 mode = "WT"; 6237 break; 6238 case PAT_UNCACHEABLE: 6239 mode = "UC"; 6240 break; 6241 case PAT_UNCACHED: 6242 mode = "U-"; 6243 break; 6244 case PAT_WRITE_PROTECTED: 6245 mode = "WP"; 6246 break; 6247 case PAT_WRITE_COMBINING: 6248 mode = "WC"; 6249 break; 6250 default: 6251 printf("%s: unknown PAT mode %#x for range 0x%08x-0x%08x\n", 6252 __func__, pat_idx, range->sva, eva); 6253 mode = "??"; 6254 break; 6255 } 6256 6257 sbuf_printf(sb, "0x%08x-0x%08x r%c%c%c%c %s %d %d %d\n", 6258 range->sva, eva, 6259 (range->attrs & PG_RW) != 0 ? 'w' : '-', 6260 (range->attrs & pg_nx) != 0 ? '-' : 'x', 6261 (range->attrs & PG_U) != 0 ? 'u' : 's', 6262 (range->attrs & PG_G) != 0 ? 'g' : '-', 6263 mode, range->pdpes, range->pdes, range->ptes); 6264 6265 /* Reset to sentinel value. */ 6266 range->sva = 0xffffffff; 6267 } 6268 6269 /* 6270 * Determine whether the attributes specified by a page table entry match those 6271 * being tracked by the current range. This is not quite as simple as a direct 6272 * flag comparison since some PAT modes have multiple representations. 6273 */ 6274 static bool 6275 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs) 6276 { 6277 pt_entry_t diff, mask; 6278 6279 mask = pg_nx | PG_G | PG_RW | PG_U | PG_PDE_CACHE; 6280 diff = (range->attrs ^ attrs) & mask; 6281 if (diff == 0) 6282 return (true); 6283 if ((diff & ~PG_PDE_PAT) == 0 && 6284 pmap_pat_index(kernel_pmap, range->attrs, true) == 6285 pmap_pat_index(kernel_pmap, attrs, true)) 6286 return (true); 6287 return (false); 6288 } 6289 6290 static void 6291 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va, 6292 pt_entry_t attrs) 6293 { 6294 6295 memset(range, 0, sizeof(*range)); 6296 range->sva = va; 6297 range->attrs = attrs; 6298 } 6299 6300 /* 6301 * Given a leaf PTE, derive the mapping's attributes. If they do not match 6302 * those of the current run, dump the address range and its attributes, and 6303 * begin a new run. 6304 */ 6305 static void 6306 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, 6307 vm_offset_t va, pd_entry_t pde, pt_entry_t pte) 6308 { 6309 pt_entry_t attrs; 6310 6311 attrs = pde & (PG_RW | PG_U | pg_nx); 6312 6313 if ((pde & PG_PS) != 0) { 6314 attrs |= pde & (PG_G | PG_PDE_CACHE); 6315 } else if (pte != 0) { 6316 attrs |= pte & pg_nx; 6317 attrs &= pg_nx | (pte & (PG_RW | PG_U)); 6318 attrs |= pte & (PG_G | PG_PTE_CACHE); 6319 6320 /* Canonicalize by always using the PDE PAT bit. */ 6321 if ((attrs & PG_PTE_PAT) != 0) 6322 attrs ^= PG_PDE_PAT | PG_PTE_PAT; 6323 } 6324 6325 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { 6326 sysctl_kmaps_dump(sb, range, va); 6327 sysctl_kmaps_reinit(range, va, attrs); 6328 } 6329 } 6330 6331 static int 6332 __CONCAT(PMTYPE, sysctl_kmaps)(SYSCTL_HANDLER_ARGS) 6333 { 6334 struct pmap_kernel_map_range range; 6335 struct sbuf sbuf, *sb; 6336 pd_entry_t pde; 6337 pt_entry_t *pt, pte; 6338 vm_offset_t sva; 6339 int error; 6340 u_int i, k; 6341 6342 error = sysctl_wire_old_buffer(req, 0); 6343 if (error != 0) 6344 return (error); 6345 sb = &sbuf; 6346 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req); 6347 6348 /* Sentinel value. */ 6349 range.sva = 0xffffffff; 6350 6351 /* 6352 * Iterate over the kernel page tables without holding the 6353 * kernel pmap lock. Kernel page table pages are never freed, 6354 * so at worst we will observe inconsistencies in the output. 6355 */ 6356 for (sva = 0, i = 0; i < NPTEPG * NPGPTD * NPDEPG ;) { 6357 if (i == 0) 6358 sbuf_printf(sb, "\nLow PDE:\n"); 6359 else if (i == LOWPTDI * NPTEPG) 6360 sbuf_printf(sb, "Low PDE dup:\n"); 6361 else if (i == PTDPTDI * NPTEPG) 6362 sbuf_printf(sb, "Recursive map:\n"); 6363 else if (i == KERNPTDI * NPTEPG) 6364 sbuf_printf(sb, "Kernel base:\n"); 6365 else if (i == TRPTDI * NPTEPG) 6366 sbuf_printf(sb, "Trampoline:\n"); 6367 pde = IdlePTD[sva >> PDRSHIFT]; 6368 if ((pde & PG_V) == 0) { 6369 sva = rounddown2(sva, NBPDR); 6370 sysctl_kmaps_dump(sb, &range, sva); 6371 sva += NBPDR; 6372 i += NPTEPG; 6373 continue; 6374 } 6375 if ((pde & PG_PS) != 0) { 6376 sysctl_kmaps_check(sb, &range, sva, pde, 0); 6377 range.pdes++; 6378 sva += NBPDR; 6379 i += NPTEPG; 6380 continue; 6381 } 6382 for (pt = vtopte(sva), k = 0; k < NPTEPG; i++, k++, pt++, 6383 sva += PAGE_SIZE) { 6384 pte = *pt; 6385 if ((pte & PG_V) == 0) { 6386 sysctl_kmaps_dump(sb, &range, sva); 6387 continue; 6388 } 6389 sysctl_kmaps_check(sb, &range, sva, pde, pte); 6390 range.ptes++; 6391 } 6392 } 6393 6394 error = sbuf_finish(sb); 6395 sbuf_delete(sb); 6396 return (error); 6397 } 6398 6399 #define PMM(a) \ 6400 .pm_##a = __CONCAT(PMTYPE, a), 6401 6402 struct pmap_methods __CONCAT(PMTYPE, methods) = { 6403 PMM(ksetrw) 6404 PMM(remap_lower) 6405 PMM(remap_lowptdi) 6406 PMM(align_superpage) 6407 PMM(quick_enter_page) 6408 PMM(quick_remove_page) 6409 PMM(trm_alloc) 6410 PMM(trm_free) 6411 PMM(get_map_low) 6412 PMM(get_vm_maxuser_address) 6413 PMM(kextract) 6414 PMM(pg_frame) 6415 PMM(sf_buf_map) 6416 PMM(cp_slow0_map) 6417 PMM(get_kcr3) 6418 PMM(get_cr3) 6419 PMM(cmap3) 6420 PMM(basemem_setup) 6421 PMM(set_nx) 6422 PMM(bios16_enter) 6423 PMM(bios16_leave) 6424 PMM(bootstrap) 6425 PMM(is_valid_memattr) 6426 PMM(cache_bits) 6427 PMM(ps_enabled) 6428 PMM(pinit0) 6429 PMM(pinit) 6430 PMM(activate) 6431 PMM(activate_boot) 6432 PMM(advise) 6433 PMM(clear_modify) 6434 PMM(change_attr) 6435 PMM(mincore) 6436 PMM(copy) 6437 PMM(copy_page) 6438 PMM(copy_pages) 6439 PMM(zero_page) 6440 PMM(zero_page_area) 6441 PMM(enter) 6442 PMM(enter_object) 6443 PMM(enter_quick) 6444 PMM(kenter_temporary) 6445 PMM(object_init_pt) 6446 PMM(unwire) 6447 PMM(page_exists_quick) 6448 PMM(page_wired_mappings) 6449 PMM(page_is_mapped) 6450 PMM(remove_pages) 6451 PMM(is_modified) 6452 PMM(is_prefaultable) 6453 PMM(is_referenced) 6454 PMM(remove_write) 6455 PMM(ts_referenced) 6456 PMM(mapdev_attr) 6457 PMM(unmapdev) 6458 PMM(page_set_memattr) 6459 PMM(extract) 6460 PMM(extract_and_hold) 6461 PMM(map) 6462 PMM(qenter) 6463 PMM(qremove) 6464 PMM(release) 6465 PMM(remove) 6466 PMM(protect) 6467 PMM(remove_all) 6468 PMM(init) 6469 PMM(init_pat) 6470 PMM(growkernel) 6471 PMM(invalidate_page) 6472 PMM(invalidate_range) 6473 PMM(invalidate_all) 6474 PMM(invalidate_cache) 6475 PMM(flush_page) 6476 PMM(kenter) 6477 PMM(kremove) 6478 PMM(sysctl_kmaps) 6479 }; 6480