1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * the Systems Programming Group of the University of Utah Computer 15 * Science Department and William Jolitz of UUNET Technologies Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. All advertising materials mentioning features or use of this software 26 * must display the following acknowledgement: 27 * This product includes software developed by the University of 28 * California, Berkeley and its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 46 */ 47 /*- 48 * Copyright (c) 2003 Networks Associates Technology, Inc. 49 * All rights reserved. 50 * Copyright (c) 2018 The FreeBSD Foundation 51 * All rights reserved. 52 * 53 * This software was developed for the FreeBSD Project by Jake Burkholder, 54 * Safeport Network Services, and Network Associates Laboratories, the 55 * Security Research Division of Network Associates, Inc. under 56 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 57 * CHATS research program. 58 * 59 * Portions of this software were developed by 60 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from 61 * the FreeBSD Foundation. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 1. Redistributions of source code must retain the above copyright 67 * notice, this list of conditions and the following disclaimer. 68 * 2. Redistributions in binary form must reproduce the above copyright 69 * notice, this list of conditions and the following disclaimer in the 70 * documentation and/or other materials provided with the distribution. 71 * 72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 82 * SUCH DAMAGE. 83 */ 84 85 #include <sys/cdefs.h> 86 __FBSDID("$FreeBSD$"); 87 88 /* 89 * Manages physical address maps. 90 * 91 * Since the information managed by this module is 92 * also stored by the logical address mapping module, 93 * this module may throw away valid virtual-to-physical 94 * mappings at almost any time. However, invalidations 95 * of virtual-to-physical mappings must be done as 96 * requested. 97 * 98 * In order to cope with hardware architectures which 99 * make virtual-to-physical map invalidates expensive, 100 * this module may delay invalidate or reduced protection 101 * operations until such time as they are actually 102 * necessary. This module is given full information as 103 * to which processors are currently using which maps, 104 * and to when physical maps must be made correct. 105 */ 106 107 #include "opt_apic.h" 108 #include "opt_cpu.h" 109 #include "opt_pmap.h" 110 #include "opt_smp.h" 111 #include "opt_vm.h" 112 113 #include <sys/param.h> 114 #include <sys/systm.h> 115 #include <sys/kernel.h> 116 #include <sys/ktr.h> 117 #include <sys/lock.h> 118 #include <sys/malloc.h> 119 #include <sys/mman.h> 120 #include <sys/msgbuf.h> 121 #include <sys/mutex.h> 122 #include <sys/proc.h> 123 #include <sys/rwlock.h> 124 #include <sys/sbuf.h> 125 #include <sys/sf_buf.h> 126 #include <sys/sx.h> 127 #include <sys/vmmeter.h> 128 #include <sys/sched.h> 129 #include <sys/sysctl.h> 130 #include <sys/smp.h> 131 #include <sys/vmem.h> 132 133 #include <vm/vm.h> 134 #include <vm/vm_param.h> 135 #include <vm/vm_kern.h> 136 #include <vm/vm_page.h> 137 #include <vm/vm_map.h> 138 #include <vm/vm_object.h> 139 #include <vm/vm_extern.h> 140 #include <vm/vm_pageout.h> 141 #include <vm/vm_pager.h> 142 #include <vm/vm_phys.h> 143 #include <vm/vm_radix.h> 144 #include <vm/vm_reserv.h> 145 #include <vm/uma.h> 146 147 #ifdef DEV_APIC 148 #include <sys/bus.h> 149 #include <machine/intr_machdep.h> 150 #include <x86/apicvar.h> 151 #endif 152 #include <x86/ifunc.h> 153 #include <machine/bootinfo.h> 154 #include <machine/cpu.h> 155 #include <machine/cputypes.h> 156 #include <machine/md_var.h> 157 #include <machine/pcb.h> 158 #include <machine/specialreg.h> 159 #ifdef SMP 160 #include <machine/smp.h> 161 #endif 162 #include <machine/pmap_base.h> 163 164 #if !defined(DIAGNOSTIC) 165 #ifdef __GNUC_GNU_INLINE__ 166 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline 167 #else 168 #define PMAP_INLINE extern inline 169 #endif 170 #else 171 #define PMAP_INLINE 172 #endif 173 174 #ifdef PV_STATS 175 #define PV_STAT(x) do { x ; } while (0) 176 #else 177 #define PV_STAT(x) do { } while (0) 178 #endif 179 180 #define pa_index(pa) ((pa) >> PDRSHIFT) 181 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 182 183 /* 184 * PTmap is recursive pagemap at top of virtual address space. 185 * Within PTmap, the page directory can be found (third indirection). 186 */ 187 #define PTmap ((pt_entry_t *)(PTDPTDI << PDRSHIFT)) 188 #define PTD ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE))) 189 #define PTDpde ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE) + \ 190 (PTDPTDI * PDESIZE))) 191 192 /* 193 * Translate a virtual address to the kernel virtual address of its page table 194 * entry (PTE). This can be used recursively. If the address of a PTE as 195 * previously returned by this macro is itself given as the argument, then the 196 * address of the page directory entry (PDE) that maps the PTE will be 197 * returned. 198 * 199 * This macro may be used before pmap_bootstrap() is called. 200 */ 201 #define vtopte(va) (PTmap + i386_btop(va)) 202 203 /* 204 * Get PDEs and PTEs for user/kernel address space 205 */ 206 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 207 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 208 209 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 210 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 211 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 212 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 213 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 214 215 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ 216 atomic_clear_int((u_int *)(pte), PG_W)) 217 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 218 219 static int pgeflag = 0; /* PG_G or-in */ 220 static int pseflag = 0; /* PG_PS or-in */ 221 222 static int nkpt = NKPT; 223 224 #ifdef PMAP_PAE_COMP 225 pt_entry_t pg_nx; 226 static uma_zone_t pdptzone; 227 #else 228 #define pg_nx 0 229 #endif 230 231 _Static_assert(VM_MAXUSER_ADDRESS == VADDR(TRPTDI, 0), "VM_MAXUSER_ADDRESS"); 232 _Static_assert(VM_MAX_KERNEL_ADDRESS <= VADDR(PTDPTDI, 0), 233 "VM_MAX_KERNEL_ADDRESS"); 234 _Static_assert(PMAP_MAP_LOW == VADDR(LOWPTDI, 0), "PMAP_MAP_LOW"); 235 _Static_assert(KERNLOAD == (KERNPTDI << PDRSHIFT), "KERNLOAD"); 236 237 extern int pat_works; 238 extern int pg_ps_enabled; 239 240 extern int elf32_nxstack; 241 242 #define PAT_INDEX_SIZE 8 243 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ 244 245 /* 246 * pmap_mapdev support pre initialization (i.e. console) 247 */ 248 #define PMAP_PREINIT_MAPPING_COUNT 8 249 static struct pmap_preinit_mapping { 250 vm_paddr_t pa; 251 vm_offset_t va; 252 vm_size_t sz; 253 int mode; 254 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; 255 static int pmap_initialized; 256 257 static struct rwlock_padalign pvh_global_lock; 258 259 /* 260 * Data for the pv entry allocation mechanism 261 */ 262 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 263 extern int pv_entry_max, pv_entry_count; 264 static int pv_entry_high_water = 0; 265 static struct md_page *pv_table; 266 extern int shpgperproc; 267 268 static struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 269 static int pv_maxchunks; /* How many chunks we have KVA for */ 270 static vm_offset_t pv_vafree; /* freelist stored in the PTE */ 271 272 /* 273 * All those kernel PT submaps that BSD is so fond of 274 */ 275 static pt_entry_t *CMAP3; 276 static pd_entry_t *KPTD; 277 static caddr_t CADDR3; 278 279 /* 280 * Crashdump maps. 281 */ 282 static caddr_t crashdumpmap; 283 284 static pt_entry_t *PMAP1 = NULL, *PMAP2, *PMAP3; 285 static pt_entry_t *PADDR1 = NULL, *PADDR2, *PADDR3; 286 #ifdef SMP 287 static int PMAP1cpu, PMAP3cpu; 288 extern int PMAP1changedcpu; 289 #endif 290 extern int PMAP1changed; 291 extern int PMAP1unchanged; 292 static struct mtx PMAP2mutex; 293 294 /* 295 * Internal flags for pmap_enter()'s helper functions. 296 */ 297 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 298 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 299 300 static void free_pv_chunk(struct pv_chunk *pc); 301 static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 302 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 303 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 304 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, 305 u_int flags); 306 #if VM_NRESERVLEVEL > 0 307 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 308 #endif 309 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 310 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 311 vm_offset_t va); 312 static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); 313 314 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); 315 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 316 static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, 317 vm_prot_t prot); 318 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, 319 u_int flags, vm_page_t m); 320 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 321 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 322 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); 323 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, 324 pd_entry_t pde); 325 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); 326 static boolean_t pmap_is_modified_pvh(struct md_page *pvh); 327 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh); 328 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 329 static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); 330 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); 331 #if VM_NRESERVLEVEL > 0 332 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 333 #endif 334 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, 335 vm_prot_t prot); 336 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); 337 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 338 struct spglist *free); 339 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 340 struct spglist *free); 341 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); 342 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free); 343 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 344 struct spglist *free); 345 static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va); 346 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 347 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 348 vm_page_t m); 349 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 350 pd_entry_t newpde); 351 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); 352 353 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); 354 355 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); 356 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); 357 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 358 static void pmap_pte_release(pt_entry_t *pte); 359 static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); 360 #ifdef PMAP_PAE_COMP 361 static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, 362 uint8_t *flags, int wait); 363 #endif 364 static void pmap_init_trm(void); 365 static void pmap_invalidate_all_int(pmap_t pmap); 366 367 static __inline void pagezero(void *page); 368 369 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 370 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 371 372 extern char _end[]; 373 extern u_long physfree; /* phys addr of next free page */ 374 extern u_long vm86phystk;/* PA of vm86/bios stack */ 375 extern u_long vm86paddr;/* address of vm86 region */ 376 extern int vm86pa; /* phys addr of vm86 region */ 377 extern u_long KERNend; /* phys addr end of kernel (just after bss) */ 378 #ifdef PMAP_PAE_COMP 379 pd_entry_t *IdlePTD_pae; /* phys addr of kernel PTD */ 380 pdpt_entry_t *IdlePDPT; /* phys addr of kernel PDPT */ 381 pt_entry_t *KPTmap_pae; /* address of kernel page tables */ 382 #define IdlePTD IdlePTD_pae 383 #define KPTmap KPTmap_pae 384 #else 385 pd_entry_t *IdlePTD_nopae; 386 pt_entry_t *KPTmap_nopae; 387 #define IdlePTD IdlePTD_nopae 388 #define KPTmap KPTmap_nopae 389 #endif 390 extern u_long KPTphys; /* phys addr of kernel page tables */ 391 extern u_long tramp_idleptd; 392 393 static u_long 394 allocpages(u_int cnt, u_long *physfree) 395 { 396 u_long res; 397 398 res = *physfree; 399 *physfree += PAGE_SIZE * cnt; 400 bzero((void *)res, PAGE_SIZE * cnt); 401 return (res); 402 } 403 404 static void 405 pmap_cold_map(u_long pa, u_long va, u_long cnt) 406 { 407 pt_entry_t *pt; 408 409 for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0; 410 cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE) 411 *pt = pa | PG_V | PG_RW | PG_A | PG_M; 412 } 413 414 static void 415 pmap_cold_mapident(u_long pa, u_long cnt) 416 { 417 418 pmap_cold_map(pa, pa, cnt); 419 } 420 421 _Static_assert(LOWPTDI * 2 * NBPDR == KERNBASE, 422 "Broken double-map of zero PTD"); 423 424 static void 425 __CONCAT(PMTYPE, remap_lower)(bool enable) 426 { 427 int i; 428 429 for (i = 0; i < LOWPTDI; i++) 430 IdlePTD[i] = enable ? IdlePTD[LOWPTDI + i] : 0; 431 load_cr3(rcr3()); /* invalidate TLB */ 432 } 433 434 /* 435 * Called from locore.s before paging is enabled. Sets up the first 436 * kernel page table. Since kernel is mapped with PA == VA, this code 437 * does not require relocations. 438 */ 439 void 440 __CONCAT(PMTYPE, cold)(void) 441 { 442 pt_entry_t *pt; 443 u_long a; 444 u_int cr3, ncr4; 445 446 physfree = (u_long)&_end; 447 if (bootinfo.bi_esymtab != 0) 448 physfree = bootinfo.bi_esymtab; 449 if (bootinfo.bi_kernend != 0) 450 physfree = bootinfo.bi_kernend; 451 physfree = roundup2(physfree, NBPDR); 452 KERNend = physfree; 453 454 /* Allocate Kernel Page Tables */ 455 KPTphys = allocpages(NKPT, &physfree); 456 KPTmap = (pt_entry_t *)KPTphys; 457 458 /* Allocate Page Table Directory */ 459 #ifdef PMAP_PAE_COMP 460 /* XXX only need 32 bytes (easier for now) */ 461 IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree); 462 #endif 463 IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree); 464 465 /* 466 * Allocate KSTACK. Leave a guard page between IdlePTD and 467 * proc0kstack, to control stack overflow for thread0 and 468 * prevent corruption of the page table. We leak the guard 469 * physical memory due to 1:1 mappings. 470 */ 471 allocpages(1, &physfree); 472 proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree); 473 474 /* vm86/bios stack */ 475 vm86phystk = allocpages(1, &physfree); 476 477 /* pgtable + ext + IOPAGES */ 478 vm86paddr = vm86pa = allocpages(3, &physfree); 479 480 /* Install page tables into PTD. Page table page 1 is wasted. */ 481 for (a = 0; a < NKPT; a++) 482 IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M; 483 484 #ifdef PMAP_PAE_COMP 485 /* PAE install PTD pointers into PDPT */ 486 for (a = 0; a < NPGPTD; a++) 487 IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V; 488 #endif 489 490 /* 491 * Install recursive mapping for kernel page tables into 492 * itself. 493 */ 494 for (a = 0; a < NPGPTD; a++) 495 IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V | 496 PG_RW; 497 498 /* 499 * Initialize page table pages mapping physical address zero 500 * through the (physical) end of the kernel. Many of these 501 * pages must be reserved, and we reserve them all and map 502 * them linearly for convenience. We do this even if we've 503 * enabled PSE above; we'll just switch the corresponding 504 * kernel PDEs before we turn on paging. 505 * 506 * This and all other page table entries allow read and write 507 * access for various reasons. Kernel mappings never have any 508 * access restrictions. 509 */ 510 pmap_cold_mapident(0, atop(NBPDR) * LOWPTDI); 511 pmap_cold_map(0, NBPDR * LOWPTDI, atop(NBPDR) * LOWPTDI); 512 pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE)); 513 514 /* Map page table directory */ 515 #ifdef PMAP_PAE_COMP 516 pmap_cold_mapident((u_long)IdlePDPT, 1); 517 #endif 518 pmap_cold_mapident((u_long)IdlePTD, NPGPTD); 519 520 /* Map early KPTmap. It is really pmap_cold_mapident. */ 521 pmap_cold_map(KPTphys, (u_long)KPTmap, NKPT); 522 523 /* Map proc0kstack */ 524 pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES); 525 /* ISA hole already mapped */ 526 527 pmap_cold_mapident(vm86phystk, 1); 528 pmap_cold_mapident(vm86pa, 3); 529 530 /* Map page 0 into the vm86 page table */ 531 *(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V; 532 533 /* ...likewise for the ISA hole for vm86 */ 534 for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0; 535 a < atop(ISA_HOLE_LENGTH); a++, pt++) 536 *pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A | 537 PG_M | PG_V; 538 539 /* Enable PSE, PGE, VME, and PAE if configured. */ 540 ncr4 = 0; 541 if ((cpu_feature & CPUID_PSE) != 0) { 542 ncr4 |= CR4_PSE; 543 pseflag = PG_PS; 544 /* 545 * Superpage mapping of the kernel text. Existing 4k 546 * page table pages are wasted. 547 */ 548 for (a = KERNBASE; a < KERNend; a += NBPDR) 549 IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M | 550 PG_RW | PG_V; 551 } 552 if ((cpu_feature & CPUID_PGE) != 0) { 553 ncr4 |= CR4_PGE; 554 pgeflag = PG_G; 555 } 556 ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0; 557 #ifdef PMAP_PAE_COMP 558 ncr4 |= CR4_PAE; 559 #endif 560 if (ncr4 != 0) 561 load_cr4(rcr4() | ncr4); 562 563 /* Now enable paging */ 564 #ifdef PMAP_PAE_COMP 565 cr3 = (u_int)IdlePDPT; 566 if ((cpu_feature & CPUID_PAT) == 0) 567 wbinvd(); 568 #else 569 cr3 = (u_int)IdlePTD; 570 #endif 571 tramp_idleptd = cr3; 572 load_cr3(cr3); 573 load_cr0(rcr0() | CR0_PG); 574 575 /* 576 * Now running relocated at KERNBASE where the system is 577 * linked to run. 578 */ 579 580 /* 581 * Remove the lowest part of the double mapping of low memory 582 * to get some null pointer checks. 583 */ 584 __CONCAT(PMTYPE, remap_lower)(false); 585 586 kernel_vm_end = /* 0 + */ NKPT * NBPDR; 587 #ifdef PMAP_PAE_COMP 588 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_PAE; 589 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_PAE; 590 i386_pmap_PDRSHIFT = PDRSHIFT_PAE; 591 #else 592 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_NOPAE; 593 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_NOPAE; 594 i386_pmap_PDRSHIFT = PDRSHIFT_NOPAE; 595 #endif 596 } 597 598 static void 599 __CONCAT(PMTYPE, set_nx)(void) 600 { 601 602 #ifdef PMAP_PAE_COMP 603 if ((amd_feature & AMDID_NX) == 0) 604 return; 605 pg_nx = PG_NX; 606 elf32_nxstack = 1; 607 /* EFER.EFER_NXE is set in initializecpu(). */ 608 #endif 609 } 610 611 /* 612 * Bootstrap the system enough to run with virtual memory. 613 * 614 * On the i386 this is called after pmap_cold() created initial 615 * kernel page table and enabled paging, and just syncs the pmap 616 * module with what has already been done. 617 */ 618 static void 619 __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr) 620 { 621 vm_offset_t va; 622 pt_entry_t *pte, *unused __unused; 623 struct pcpu *pc; 624 u_long res; 625 int i; 626 627 res = atop(firstaddr - (vm_paddr_t)KERNLOAD); 628 629 /* 630 * Add a physical memory segment (vm_phys_seg) corresponding to the 631 * preallocated kernel page table pages so that vm_page structures 632 * representing these pages will be created. The vm_page structures 633 * are required for promotion of the corresponding kernel virtual 634 * addresses to superpage mappings. 635 */ 636 vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 637 638 /* 639 * Initialize the first available kernel virtual address. 640 * However, using "firstaddr" may waste a few pages of the 641 * kernel virtual address space, because pmap_cold() may not 642 * have mapped every physical page that it allocated. 643 * Preferably, pmap_cold() would provide a first unused 644 * virtual address in addition to "firstaddr". 645 */ 646 virtual_avail = (vm_offset_t)firstaddr; 647 virtual_end = VM_MAX_KERNEL_ADDRESS; 648 649 /* 650 * Initialize the kernel pmap (which is statically allocated). 651 * Count bootstrap data as being resident in case any of this data is 652 * later unmapped (using pmap_remove()) and freed. 653 */ 654 PMAP_LOCK_INIT(kernel_pmap); 655 kernel_pmap->pm_pdir = IdlePTD; 656 #ifdef PMAP_PAE_COMP 657 kernel_pmap->pm_pdpt = IdlePDPT; 658 #endif 659 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 660 kernel_pmap->pm_stats.resident_count = res; 661 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 662 663 /* 664 * Initialize the global pv list lock. 665 */ 666 rw_init(&pvh_global_lock, "pmap pv global"); 667 668 /* 669 * Reserve some special page table entries/VA space for temporary 670 * mapping of pages. 671 */ 672 #define SYSMAP(c, p, v, n) \ 673 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 674 675 va = virtual_avail; 676 pte = vtopte(va); 677 678 /* 679 * Initialize temporary map objects on the current CPU for use 680 * during early boot. 681 * CMAP1/CMAP2 are used for zeroing and copying pages. 682 * CMAP3 is used for the boot-time memory test. 683 */ 684 pc = get_pcpu(); 685 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 686 SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1) 687 SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1) 688 SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1) 689 690 SYSMAP(caddr_t, CMAP3, CADDR3, 1); 691 692 /* 693 * Crashdump maps. 694 */ 695 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 696 697 /* 698 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 699 */ 700 SYSMAP(caddr_t, unused, ptvmmap, 1) 701 702 /* 703 * msgbufp is used to map the system message buffer. 704 */ 705 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 706 707 /* 708 * KPTmap is used by pmap_kextract(). 709 * 710 * KPTmap is first initialized by pmap_cold(). However, that initial 711 * KPTmap can only support NKPT page table pages. Here, a larger 712 * KPTmap is created that can support KVA_PAGES page table pages. 713 */ 714 SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) 715 716 for (i = 0; i < NKPT; i++) 717 KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V; 718 719 /* 720 * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), 721 * respectively. 722 */ 723 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 724 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 725 SYSMAP(pt_entry_t *, PMAP3, PADDR3, 1) 726 727 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 728 729 virtual_avail = va; 730 731 /* 732 * Initialize the PAT MSR if present. 733 * pmap_init_pat() clears and sets CR4_PGE, which, as a 734 * side-effect, invalidates stale PG_G TLB entries that might 735 * have been created in our pre-boot environment. We assume 736 * that PAT support implies PGE and in reverse, PGE presence 737 * comes with PAT. Both features were added for Pentium Pro. 738 */ 739 pmap_init_pat(); 740 } 741 742 static void 743 pmap_init_reserved_pages(void) 744 { 745 struct pcpu *pc; 746 vm_offset_t pages; 747 int i; 748 749 #ifdef PMAP_PAE_COMP 750 if (!pae_mode) 751 return; 752 #else 753 if (pae_mode) 754 return; 755 #endif 756 CPU_FOREACH(i) { 757 pc = pcpu_find(i); 758 mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | 759 MTX_NEW); 760 pc->pc_copyout_maddr = kva_alloc(ptoa(2)); 761 if (pc->pc_copyout_maddr == 0) 762 panic("unable to allocate non-sleepable copyout KVA"); 763 sx_init(&pc->pc_copyout_slock, "cpslk"); 764 pc->pc_copyout_saddr = kva_alloc(ptoa(2)); 765 if (pc->pc_copyout_saddr == 0) 766 panic("unable to allocate sleepable copyout KVA"); 767 pc->pc_pmap_eh_va = kva_alloc(ptoa(1)); 768 if (pc->pc_pmap_eh_va == 0) 769 panic("unable to allocate pmap_extract_and_hold KVA"); 770 pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va); 771 772 /* 773 * Skip if the mappings have already been initialized, 774 * i.e. this is the BSP. 775 */ 776 if (pc->pc_cmap_addr1 != 0) 777 continue; 778 779 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 780 pages = kva_alloc(PAGE_SIZE * 3); 781 if (pages == 0) 782 panic("unable to allocate CMAP KVA"); 783 pc->pc_cmap_pte1 = vtopte(pages); 784 pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); 785 pc->pc_cmap_addr1 = (caddr_t)pages; 786 pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); 787 pc->pc_qmap_addr = pages + ptoa(2); 788 } 789 } 790 791 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 792 793 /* 794 * Setup the PAT MSR. 795 */ 796 static void 797 __CONCAT(PMTYPE, init_pat)(void) 798 { 799 int pat_table[PAT_INDEX_SIZE]; 800 uint64_t pat_msr; 801 u_long cr0, cr4; 802 int i; 803 804 /* Set default PAT index table. */ 805 for (i = 0; i < PAT_INDEX_SIZE; i++) 806 pat_table[i] = -1; 807 pat_table[PAT_WRITE_BACK] = 0; 808 pat_table[PAT_WRITE_THROUGH] = 1; 809 pat_table[PAT_UNCACHEABLE] = 3; 810 pat_table[PAT_WRITE_COMBINING] = 3; 811 pat_table[PAT_WRITE_PROTECTED] = 3; 812 pat_table[PAT_UNCACHED] = 3; 813 814 /* 815 * Bail if this CPU doesn't implement PAT. 816 * We assume that PAT support implies PGE. 817 */ 818 if ((cpu_feature & CPUID_PAT) == 0) { 819 for (i = 0; i < PAT_INDEX_SIZE; i++) 820 pat_index[i] = pat_table[i]; 821 pat_works = 0; 822 return; 823 } 824 825 /* 826 * Due to some Intel errata, we can only safely use the lower 4 827 * PAT entries. 828 * 829 * Intel Pentium III Processor Specification Update 830 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 831 * or Mode C Paging) 832 * 833 * Intel Pentium IV Processor Specification Update 834 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 835 */ 836 if (cpu_vendor_id == CPU_VENDOR_INTEL && 837 !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) 838 pat_works = 0; 839 840 /* Initialize default PAT entries. */ 841 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | 842 PAT_VALUE(1, PAT_WRITE_THROUGH) | 843 PAT_VALUE(2, PAT_UNCACHED) | 844 PAT_VALUE(3, PAT_UNCACHEABLE) | 845 PAT_VALUE(4, PAT_WRITE_BACK) | 846 PAT_VALUE(5, PAT_WRITE_THROUGH) | 847 PAT_VALUE(6, PAT_UNCACHED) | 848 PAT_VALUE(7, PAT_UNCACHEABLE); 849 850 if (pat_works) { 851 /* 852 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. 853 * Program 5 and 6 as WP and WC. 854 * Leave 4 and 7 as WB and UC. 855 */ 856 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); 857 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | 858 PAT_VALUE(6, PAT_WRITE_COMBINING); 859 pat_table[PAT_UNCACHED] = 2; 860 pat_table[PAT_WRITE_PROTECTED] = 5; 861 pat_table[PAT_WRITE_COMBINING] = 6; 862 } else { 863 /* 864 * Just replace PAT Index 2 with WC instead of UC-. 865 */ 866 pat_msr &= ~PAT_MASK(2); 867 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 868 pat_table[PAT_WRITE_COMBINING] = 2; 869 } 870 871 /* Disable PGE. */ 872 cr4 = rcr4(); 873 load_cr4(cr4 & ~CR4_PGE); 874 875 /* Disable caches (CD = 1, NW = 0). */ 876 cr0 = rcr0(); 877 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 878 879 /* Flushes caches and TLBs. */ 880 wbinvd(); 881 invltlb(); 882 883 /* Update PAT and index table. */ 884 wrmsr(MSR_PAT, pat_msr); 885 for (i = 0; i < PAT_INDEX_SIZE; i++) 886 pat_index[i] = pat_table[i]; 887 888 /* Flush caches and TLBs again. */ 889 wbinvd(); 890 invltlb(); 891 892 /* Restore caches and PGE. */ 893 load_cr0(cr0); 894 load_cr4(cr4); 895 } 896 897 #ifdef PMAP_PAE_COMP 898 static void * 899 pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 900 int wait) 901 { 902 903 /* Inform UMA that this allocator uses kernel_map/object. */ 904 *flags = UMA_SLAB_KERNEL; 905 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 906 bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 907 } 908 #endif 909 910 /* 911 * Abuse the pte nodes for unmapped kva to thread a kva freelist through. 912 * Requirements: 913 * - Must deal with pages in order to ensure that none of the PG_* bits 914 * are ever set, PG_V in particular. 915 * - Assumes we can write to ptes without pte_store() atomic ops, even 916 * on PAE systems. This should be ok. 917 * - Assumes nothing will ever test these addresses for 0 to indicate 918 * no mapping instead of correctly checking PG_V. 919 * - Assumes a vm_offset_t will fit in a pte (true for i386). 920 * Because PG_V is never set, there can be no mappings to invalidate. 921 */ 922 static vm_offset_t 923 pmap_ptelist_alloc(vm_offset_t *head) 924 { 925 pt_entry_t *pte; 926 vm_offset_t va; 927 928 va = *head; 929 if (va == 0) 930 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 931 pte = vtopte(va); 932 *head = *pte; 933 if (*head & PG_V) 934 panic("pmap_ptelist_alloc: va with PG_V set!"); 935 *pte = 0; 936 return (va); 937 } 938 939 static void 940 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 941 { 942 pt_entry_t *pte; 943 944 if (va & PG_V) 945 panic("pmap_ptelist_free: freeing va with PG_V set!"); 946 pte = vtopte(va); 947 *pte = *head; /* virtual! PG_V is 0 though */ 948 *head = va; 949 } 950 951 static void 952 pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 953 { 954 int i; 955 vm_offset_t va; 956 957 *head = 0; 958 for (i = npages - 1; i >= 0; i--) { 959 va = (vm_offset_t)base + i * PAGE_SIZE; 960 pmap_ptelist_free(head, va); 961 } 962 } 963 964 /* 965 * Initialize the pmap module. 966 * Called by vm_init, to initialize any structures that the pmap 967 * system needs to map virtual memory. 968 */ 969 static void 970 __CONCAT(PMTYPE, init)(void) 971 { 972 struct pmap_preinit_mapping *ppim; 973 vm_page_t mpte; 974 vm_size_t s; 975 int i, pv_npg; 976 977 /* 978 * Initialize the vm page array entries for the kernel pmap's 979 * page table pages. 980 */ 981 PMAP_LOCK(kernel_pmap); 982 for (i = 0; i < NKPT; i++) { 983 mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); 984 KASSERT(mpte >= vm_page_array && 985 mpte < &vm_page_array[vm_page_array_size], 986 ("pmap_init: page table page is out of range")); 987 mpte->pindex = i + KPTDI; 988 mpte->phys_addr = KPTphys + ptoa(i); 989 mpte->ref_count = 1; 990 991 /* 992 * Collect the page table pages that were replaced by a 2/4MB 993 * page. They are filled with equivalent 4KB page mappings. 994 */ 995 if (pseflag != 0 && 996 KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && 997 pmap_insert_pt_page(kernel_pmap, mpte, true)) 998 panic("pmap_init: pmap_insert_pt_page failed"); 999 } 1000 PMAP_UNLOCK(kernel_pmap); 1001 vm_wire_add(NKPT); 1002 1003 /* 1004 * Initialize the address space (zone) for the pv entries. Set a 1005 * high water mark so that the system can recover from excessive 1006 * numbers of pv entries. 1007 */ 1008 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1009 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1010 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1011 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1012 pv_entry_high_water = 9 * (pv_entry_max / 10); 1013 1014 /* 1015 * If the kernel is running on a virtual machine, then it must assume 1016 * that MCA is enabled by the hypervisor. Moreover, the kernel must 1017 * be prepared for the hypervisor changing the vendor and family that 1018 * are reported by CPUID. Consequently, the workaround for AMD Family 1019 * 10h Erratum 383 is enabled if the processor's feature set does not 1020 * include at least one feature that is only supported by older Intel 1021 * or newer AMD processors. 1022 */ 1023 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 && 1024 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | 1025 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | 1026 AMDID2_FMA4)) == 0) 1027 workaround_erratum383 = 1; 1028 1029 /* 1030 * Are large page mappings supported and enabled? 1031 */ 1032 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 1033 if (pseflag == 0) 1034 pg_ps_enabled = 0; 1035 else if (pg_ps_enabled) { 1036 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1037 ("pmap_init: can't assign to pagesizes[1]")); 1038 pagesizes[1] = NBPDR; 1039 } 1040 1041 /* 1042 * Calculate the size of the pv head table for superpages. 1043 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1044 */ 1045 pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end - 1046 PAGE_SIZE) / NBPDR + 1; 1047 1048 /* 1049 * Allocate memory for the pv head table for superpages. 1050 */ 1051 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1052 s = round_page(s); 1053 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); 1054 for (i = 0; i < pv_npg; i++) 1055 TAILQ_INIT(&pv_table[i].pv_list); 1056 1057 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1058 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1059 if (pv_chunkbase == NULL) 1060 panic("pmap_init: not enough kvm for pv chunks"); 1061 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1062 #ifdef PMAP_PAE_COMP 1063 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 1064 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 1065 UMA_ZONE_CONTIG | UMA_ZONE_VM | UMA_ZONE_NOFREE); 1066 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 1067 #endif 1068 1069 pmap_initialized = 1; 1070 pmap_init_trm(); 1071 1072 if (!bootverbose) 1073 return; 1074 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 1075 ppim = pmap_preinit_mapping + i; 1076 if (ppim->va == 0) 1077 continue; 1078 printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, 1079 (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); 1080 } 1081 1082 } 1083 1084 extern u_long pmap_pde_demotions; 1085 extern u_long pmap_pde_mappings; 1086 extern u_long pmap_pde_p_failures; 1087 extern u_long pmap_pde_promotions; 1088 1089 /*************************************************** 1090 * Low level helper routines..... 1091 ***************************************************/ 1092 1093 static boolean_t 1094 __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode) 1095 { 1096 1097 return (mode >= 0 && mode < PAT_INDEX_SIZE && 1098 pat_index[(int)mode] >= 0); 1099 } 1100 1101 /* 1102 * Determine the appropriate bits to set in a PTE or PDE for a specified 1103 * caching mode. 1104 */ 1105 static int 1106 __CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde) 1107 { 1108 int cache_bits, pat_flag, pat_idx; 1109 1110 if (!pmap_is_valid_memattr(pmap, mode)) 1111 panic("Unknown caching mode %d\n", mode); 1112 1113 /* The PAT bit is different for PTE's and PDE's. */ 1114 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1115 1116 /* Map the caching mode to a PAT index. */ 1117 pat_idx = pat_index[mode]; 1118 1119 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 1120 cache_bits = 0; 1121 if (pat_idx & 0x4) 1122 cache_bits |= pat_flag; 1123 if (pat_idx & 0x2) 1124 cache_bits |= PG_NC_PCD; 1125 if (pat_idx & 0x1) 1126 cache_bits |= PG_NC_PWT; 1127 return (cache_bits); 1128 } 1129 1130 static int 1131 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde) 1132 { 1133 int pat_flag, pat_idx; 1134 1135 if ((cpu_feature & CPUID_PAT) == 0) 1136 return (0); 1137 1138 pat_idx = 0; 1139 /* The PAT bit is different for PTE's and PDE's. */ 1140 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1141 1142 if ((pte & pat_flag) != 0) 1143 pat_idx |= 0x4; 1144 if ((pte & PG_NC_PCD) != 0) 1145 pat_idx |= 0x2; 1146 if ((pte & PG_NC_PWT) != 0) 1147 pat_idx |= 0x1; 1148 1149 /* See pmap_init_pat(). */ 1150 if (pat_works) { 1151 if (pat_idx == 4) 1152 pat_idx = 0; 1153 if (pat_idx == 7) 1154 pat_idx = 3; 1155 } else { 1156 /* XXXKIB */ 1157 } 1158 1159 return (pat_idx); 1160 } 1161 1162 static bool 1163 __CONCAT(PMTYPE, ps_enabled)(pmap_t pmap __unused) 1164 { 1165 1166 return (pg_ps_enabled); 1167 } 1168 1169 /* 1170 * The caller is responsible for maintaining TLB consistency. 1171 */ 1172 static void 1173 pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) 1174 { 1175 pd_entry_t *pde; 1176 1177 pde = pmap_pde(kernel_pmap, va); 1178 pde_store(pde, newpde); 1179 } 1180 1181 /* 1182 * After changing the page size for the specified virtual address in the page 1183 * table, flush the corresponding entries from the processor's TLB. Only the 1184 * calling processor's TLB is affected. 1185 * 1186 * The calling thread must be pinned to a processor. 1187 */ 1188 static void 1189 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) 1190 { 1191 1192 if ((newpde & PG_PS) == 0) 1193 /* Demotion: flush a specific 2MB page mapping. */ 1194 invlpg(va); 1195 else /* if ((newpde & PG_G) == 0) */ 1196 /* 1197 * Promotion: flush every 4KB page mapping from the TLB 1198 * because there are too many to flush individually. 1199 */ 1200 invltlb(); 1201 } 1202 1203 #ifdef SMP 1204 1205 static void 1206 pmap_curcpu_cb_dummy(pmap_t pmap __unused, vm_offset_t addr1 __unused, 1207 vm_offset_t addr2 __unused) 1208 { 1209 } 1210 1211 /* 1212 * For SMP, these functions have to use the IPI mechanism for coherence. 1213 * 1214 * N.B.: Before calling any of the following TLB invalidation functions, 1215 * the calling processor must ensure that all stores updating a non- 1216 * kernel page table are globally performed. Otherwise, another 1217 * processor could cache an old, pre-update entry without being 1218 * invalidated. This can happen one of two ways: (1) The pmap becomes 1219 * active on another processor after its pm_active field is checked by 1220 * one of the following functions but before a store updating the page 1221 * table is globally performed. (2) The pmap becomes active on another 1222 * processor before its pm_active field is checked but due to 1223 * speculative loads one of the following functions stills reads the 1224 * pmap as inactive on the other processor. 1225 * 1226 * The kernel page table is exempt because its pm_active field is 1227 * immutable. The kernel page table is always active on every 1228 * processor. 1229 */ 1230 static void 1231 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1232 { 1233 cpuset_t *mask, other_cpus; 1234 u_int cpuid; 1235 1236 sched_pin(); 1237 if (pmap == kernel_pmap) { 1238 invlpg(va); 1239 mask = &all_cpus; 1240 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1241 mask = &all_cpus; 1242 } else { 1243 cpuid = PCPU_GET(cpuid); 1244 other_cpus = all_cpus; 1245 CPU_CLR(cpuid, &other_cpus); 1246 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1247 mask = &other_cpus; 1248 } 1249 smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy); 1250 sched_unpin(); 1251 } 1252 1253 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ 1254 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) 1255 1256 static void 1257 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1258 { 1259 cpuset_t *mask, other_cpus; 1260 vm_offset_t addr; 1261 u_int cpuid; 1262 1263 if (eva - sva >= PMAP_INVLPG_THRESHOLD) { 1264 pmap_invalidate_all_int(pmap); 1265 return; 1266 } 1267 1268 sched_pin(); 1269 if (pmap == kernel_pmap) { 1270 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1271 invlpg(addr); 1272 mask = &all_cpus; 1273 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1274 mask = &all_cpus; 1275 } else { 1276 cpuid = PCPU_GET(cpuid); 1277 other_cpus = all_cpus; 1278 CPU_CLR(cpuid, &other_cpus); 1279 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1280 mask = &other_cpus; 1281 } 1282 smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy); 1283 sched_unpin(); 1284 } 1285 1286 static void 1287 pmap_invalidate_all_int(pmap_t pmap) 1288 { 1289 cpuset_t *mask, other_cpus; 1290 u_int cpuid; 1291 1292 sched_pin(); 1293 if (pmap == kernel_pmap) { 1294 invltlb(); 1295 mask = &all_cpus; 1296 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1297 mask = &all_cpus; 1298 } else { 1299 cpuid = PCPU_GET(cpuid); 1300 other_cpus = all_cpus; 1301 CPU_CLR(cpuid, &other_cpus); 1302 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1303 mask = &other_cpus; 1304 } 1305 smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy); 1306 sched_unpin(); 1307 } 1308 1309 static void 1310 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, 1311 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) 1312 { 1313 wbinvd(); 1314 } 1315 1316 static void 1317 __CONCAT(PMTYPE, invalidate_cache)(void) 1318 { 1319 smp_cache_flush(pmap_invalidate_cache_curcpu_cb); 1320 } 1321 1322 struct pde_action { 1323 cpuset_t invalidate; /* processors that invalidate their TLB */ 1324 vm_offset_t va; 1325 pd_entry_t *pde; 1326 pd_entry_t newpde; 1327 u_int store; /* processor that updates the PDE */ 1328 }; 1329 1330 static void 1331 pmap_update_pde_kernel(void *arg) 1332 { 1333 struct pde_action *act = arg; 1334 pd_entry_t *pde; 1335 1336 if (act->store == PCPU_GET(cpuid)) { 1337 pde = pmap_pde(kernel_pmap, act->va); 1338 pde_store(pde, act->newpde); 1339 } 1340 } 1341 1342 static void 1343 pmap_update_pde_user(void *arg) 1344 { 1345 struct pde_action *act = arg; 1346 1347 if (act->store == PCPU_GET(cpuid)) 1348 pde_store(act->pde, act->newpde); 1349 } 1350 1351 static void 1352 pmap_update_pde_teardown(void *arg) 1353 { 1354 struct pde_action *act = arg; 1355 1356 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) 1357 pmap_update_pde_invalidate(act->va, act->newpde); 1358 } 1359 1360 /* 1361 * Change the page size for the specified virtual address in a way that 1362 * prevents any possibility of the TLB ever having two entries that map the 1363 * same virtual address using different page sizes. This is the recommended 1364 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a 1365 * machine check exception for a TLB state that is improperly diagnosed as a 1366 * hardware error. 1367 */ 1368 static void 1369 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1370 { 1371 struct pde_action act; 1372 cpuset_t active, other_cpus; 1373 u_int cpuid; 1374 1375 sched_pin(); 1376 cpuid = PCPU_GET(cpuid); 1377 other_cpus = all_cpus; 1378 CPU_CLR(cpuid, &other_cpus); 1379 if (pmap == kernel_pmap) 1380 active = all_cpus; 1381 else 1382 active = pmap->pm_active; 1383 if (CPU_OVERLAP(&active, &other_cpus)) { 1384 act.store = cpuid; 1385 act.invalidate = active; 1386 act.va = va; 1387 act.pde = pde; 1388 act.newpde = newpde; 1389 CPU_SET(cpuid, &active); 1390 smp_rendezvous_cpus(active, 1391 smp_no_rendezvous_barrier, pmap == kernel_pmap ? 1392 pmap_update_pde_kernel : pmap_update_pde_user, 1393 pmap_update_pde_teardown, &act); 1394 } else { 1395 if (pmap == kernel_pmap) 1396 pmap_kenter_pde(va, newpde); 1397 else 1398 pde_store(pde, newpde); 1399 if (CPU_ISSET(cpuid, &active)) 1400 pmap_update_pde_invalidate(va, newpde); 1401 } 1402 sched_unpin(); 1403 } 1404 #else /* !SMP */ 1405 /* 1406 * Normal, non-SMP, 486+ invalidation functions. 1407 * We inline these within pmap.c for speed. 1408 */ 1409 static void 1410 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1411 { 1412 1413 if (pmap == kernel_pmap) 1414 invlpg(va); 1415 } 1416 1417 static void 1418 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1419 { 1420 vm_offset_t addr; 1421 1422 if (pmap == kernel_pmap) 1423 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1424 invlpg(addr); 1425 } 1426 1427 static void 1428 pmap_invalidate_all_int(pmap_t pmap) 1429 { 1430 1431 if (pmap == kernel_pmap) 1432 invltlb(); 1433 } 1434 1435 static void 1436 __CONCAT(PMTYPE, invalidate_cache)(void) 1437 { 1438 1439 wbinvd(); 1440 } 1441 1442 static void 1443 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1444 { 1445 1446 if (pmap == kernel_pmap) 1447 pmap_kenter_pde(va, newpde); 1448 else 1449 pde_store(pde, newpde); 1450 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1451 pmap_update_pde_invalidate(va, newpde); 1452 } 1453 #endif /* !SMP */ 1454 1455 static void 1456 __CONCAT(PMTYPE, invalidate_page)(pmap_t pmap, vm_offset_t va) 1457 { 1458 1459 pmap_invalidate_page_int(pmap, va); 1460 } 1461 1462 static void 1463 __CONCAT(PMTYPE, invalidate_range)(pmap_t pmap, vm_offset_t sva, 1464 vm_offset_t eva) 1465 { 1466 1467 pmap_invalidate_range_int(pmap, sva, eva); 1468 } 1469 1470 static void 1471 __CONCAT(PMTYPE, invalidate_all)(pmap_t pmap) 1472 { 1473 1474 pmap_invalidate_all_int(pmap); 1475 } 1476 1477 static void 1478 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1479 { 1480 1481 /* 1482 * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was 1483 * created by a promotion that did not invalidate the 512 or 1024 4KB 1484 * page mappings that might exist in the TLB. Consequently, at this 1485 * point, the TLB may hold both 4KB and 2- or 4MB page mappings for 1486 * the address range [va, va + NBPDR). Therefore, the entire range 1487 * must be invalidated here. In contrast, when PG_PROMOTED is clear, 1488 * the TLB will not hold any 4KB page mappings for the address range 1489 * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the 1490 * 2- or 4MB page mapping from the TLB. 1491 */ 1492 if ((pde & PG_PROMOTED) != 0) 1493 pmap_invalidate_range_int(pmap, va, va + NBPDR - 1); 1494 else 1495 pmap_invalidate_page_int(pmap, va); 1496 } 1497 1498 /* 1499 * Are we current address space or kernel? 1500 */ 1501 static __inline int 1502 pmap_is_current(pmap_t pmap) 1503 { 1504 1505 return (pmap == kernel_pmap); 1506 } 1507 1508 /* 1509 * If the given pmap is not the current or kernel pmap, the returned pte must 1510 * be released by passing it to pmap_pte_release(). 1511 */ 1512 static pt_entry_t * 1513 __CONCAT(PMTYPE, pte)(pmap_t pmap, vm_offset_t va) 1514 { 1515 pd_entry_t newpf; 1516 pd_entry_t *pde; 1517 1518 pde = pmap_pde(pmap, va); 1519 if (*pde & PG_PS) 1520 return (pde); 1521 if (*pde != 0) { 1522 /* are we current address space or kernel? */ 1523 if (pmap_is_current(pmap)) 1524 return (vtopte(va)); 1525 mtx_lock(&PMAP2mutex); 1526 newpf = *pde & PG_FRAME; 1527 if ((*PMAP2 & PG_FRAME) != newpf) { 1528 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; 1529 pmap_invalidate_page_int(kernel_pmap, 1530 (vm_offset_t)PADDR2); 1531 } 1532 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1533 } 1534 return (NULL); 1535 } 1536 1537 /* 1538 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1539 * being NULL. 1540 */ 1541 static __inline void 1542 pmap_pte_release(pt_entry_t *pte) 1543 { 1544 1545 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) 1546 mtx_unlock(&PMAP2mutex); 1547 } 1548 1549 /* 1550 * NB: The sequence of updating a page table followed by accesses to the 1551 * corresponding pages is subject to the situation described in the "AMD64 1552 * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23, 1553 * "7.3.1 Special Coherency Considerations". Therefore, issuing the INVLPG 1554 * right after modifying the PTE bits is crucial. 1555 */ 1556 static __inline void 1557 invlcaddr(void *caddr) 1558 { 1559 1560 invlpg((u_int)caddr); 1561 } 1562 1563 /* 1564 * Super fast pmap_pte routine best used when scanning 1565 * the pv lists. This eliminates many coarse-grained 1566 * invltlb calls. Note that many of the pv list 1567 * scans are across different pmaps. It is very wasteful 1568 * to do an entire invltlb for checking a single mapping. 1569 * 1570 * If the given pmap is not the current pmap, pvh_global_lock 1571 * must be held and curthread pinned to a CPU. 1572 */ 1573 static pt_entry_t * 1574 pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1575 { 1576 pd_entry_t newpf; 1577 pd_entry_t *pde; 1578 1579 pde = pmap_pde(pmap, va); 1580 if (*pde & PG_PS) 1581 return (pde); 1582 if (*pde != 0) { 1583 /* are we current address space or kernel? */ 1584 if (pmap_is_current(pmap)) 1585 return (vtopte(va)); 1586 rw_assert(&pvh_global_lock, RA_WLOCKED); 1587 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1588 newpf = *pde & PG_FRAME; 1589 if ((*PMAP1 & PG_FRAME) != newpf) { 1590 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; 1591 #ifdef SMP 1592 PMAP1cpu = PCPU_GET(cpuid); 1593 #endif 1594 invlcaddr(PADDR1); 1595 PMAP1changed++; 1596 } else 1597 #ifdef SMP 1598 if (PMAP1cpu != PCPU_GET(cpuid)) { 1599 PMAP1cpu = PCPU_GET(cpuid); 1600 invlcaddr(PADDR1); 1601 PMAP1changedcpu++; 1602 } else 1603 #endif 1604 PMAP1unchanged++; 1605 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1606 } 1607 return (0); 1608 } 1609 1610 static pt_entry_t * 1611 pmap_pte_quick3(pmap_t pmap, vm_offset_t va) 1612 { 1613 pd_entry_t newpf; 1614 pd_entry_t *pde; 1615 1616 pde = pmap_pde(pmap, va); 1617 if (*pde & PG_PS) 1618 return (pde); 1619 if (*pde != 0) { 1620 rw_assert(&pvh_global_lock, RA_WLOCKED); 1621 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1622 newpf = *pde & PG_FRAME; 1623 if ((*PMAP3 & PG_FRAME) != newpf) { 1624 *PMAP3 = newpf | PG_RW | PG_V | PG_A | PG_M; 1625 #ifdef SMP 1626 PMAP3cpu = PCPU_GET(cpuid); 1627 #endif 1628 invlcaddr(PADDR3); 1629 PMAP1changed++; 1630 } else 1631 #ifdef SMP 1632 if (PMAP3cpu != PCPU_GET(cpuid)) { 1633 PMAP3cpu = PCPU_GET(cpuid); 1634 invlcaddr(PADDR3); 1635 PMAP1changedcpu++; 1636 } else 1637 #endif 1638 PMAP1unchanged++; 1639 return (PADDR3 + (i386_btop(va) & (NPTEPG - 1))); 1640 } 1641 return (0); 1642 } 1643 1644 static pt_entry_t 1645 pmap_pte_ufast(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1646 { 1647 pt_entry_t *eh_ptep, pte, *ptep; 1648 1649 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1650 pde &= PG_FRAME; 1651 critical_enter(); 1652 eh_ptep = (pt_entry_t *)PCPU_GET(pmap_eh_ptep); 1653 if ((*eh_ptep & PG_FRAME) != pde) { 1654 *eh_ptep = pde | PG_RW | PG_V | PG_A | PG_M; 1655 invlcaddr((void *)PCPU_GET(pmap_eh_va)); 1656 } 1657 ptep = (pt_entry_t *)PCPU_GET(pmap_eh_va) + (i386_btop(va) & 1658 (NPTEPG - 1)); 1659 pte = *ptep; 1660 critical_exit(); 1661 return (pte); 1662 } 1663 1664 /* 1665 * Extract from the kernel page table the physical address that is mapped by 1666 * the given virtual address "va". 1667 * 1668 * This function may be used before pmap_bootstrap() is called. 1669 */ 1670 static vm_paddr_t 1671 __CONCAT(PMTYPE, kextract)(vm_offset_t va) 1672 { 1673 vm_paddr_t pa; 1674 1675 if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) { 1676 pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 1677 } else { 1678 /* 1679 * Beware of a concurrent promotion that changes the PDE at 1680 * this point! For example, vtopte() must not be used to 1681 * access the PTE because it would use the new PDE. It is, 1682 * however, safe to use the old PDE because the page table 1683 * page is preserved by the promotion. 1684 */ 1685 pa = KPTmap[i386_btop(va)]; 1686 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1687 } 1688 return (pa); 1689 } 1690 1691 /* 1692 * Routine: pmap_extract 1693 * Function: 1694 * Extract the physical page address associated 1695 * with the given map/virtual_address pair. 1696 */ 1697 static vm_paddr_t 1698 __CONCAT(PMTYPE, extract)(pmap_t pmap, vm_offset_t va) 1699 { 1700 vm_paddr_t rtval; 1701 pt_entry_t pte; 1702 pd_entry_t pde; 1703 1704 rtval = 0; 1705 PMAP_LOCK(pmap); 1706 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1707 if (pde != 0) { 1708 if ((pde & PG_PS) != 0) 1709 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); 1710 else { 1711 pte = pmap_pte_ufast(pmap, va, pde); 1712 rtval = (pte & PG_FRAME) | (va & PAGE_MASK); 1713 } 1714 } 1715 PMAP_UNLOCK(pmap); 1716 return (rtval); 1717 } 1718 1719 /* 1720 * Routine: pmap_extract_and_hold 1721 * Function: 1722 * Atomically extract and hold the physical page 1723 * with the given pmap and virtual address pair 1724 * if that mapping permits the given protection. 1725 */ 1726 static vm_page_t 1727 __CONCAT(PMTYPE, extract_and_hold)(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1728 { 1729 pd_entry_t pde; 1730 pt_entry_t pte; 1731 vm_page_t m; 1732 1733 m = NULL; 1734 PMAP_LOCK(pmap); 1735 pde = *pmap_pde(pmap, va); 1736 if (pde != 0) { 1737 if (pde & PG_PS) { 1738 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) 1739 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1740 (va & PDRMASK)); 1741 } else { 1742 pte = pmap_pte_ufast(pmap, va, pde); 1743 if (pte != 0 && 1744 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) 1745 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1746 } 1747 if (m != NULL && !vm_page_wire_mapped(m)) 1748 m = NULL; 1749 } 1750 PMAP_UNLOCK(pmap); 1751 return (m); 1752 } 1753 1754 /*************************************************** 1755 * Low level mapping routines..... 1756 ***************************************************/ 1757 1758 /* 1759 * Add a wired page to the kva. 1760 * Note: not SMP coherent. 1761 * 1762 * This function may be used before pmap_bootstrap() is called. 1763 */ 1764 static void 1765 __CONCAT(PMTYPE, kenter)(vm_offset_t va, vm_paddr_t pa) 1766 { 1767 pt_entry_t *pte; 1768 1769 pte = vtopte(va); 1770 pte_store(pte, pa | PG_RW | PG_V); 1771 } 1772 1773 static __inline void 1774 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1775 { 1776 pt_entry_t *pte; 1777 1778 pte = vtopte(va); 1779 pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, 1780 mode, 0)); 1781 } 1782 1783 /* 1784 * Remove a page from the kernel pagetables. 1785 * Note: not SMP coherent. 1786 * 1787 * This function may be used before pmap_bootstrap() is called. 1788 */ 1789 static void 1790 __CONCAT(PMTYPE, kremove)(vm_offset_t va) 1791 { 1792 pt_entry_t *pte; 1793 1794 pte = vtopte(va); 1795 pte_clear(pte); 1796 } 1797 1798 /* 1799 * Used to map a range of physical addresses into kernel 1800 * virtual address space. 1801 * 1802 * The value passed in '*virt' is a suggested virtual address for 1803 * the mapping. Architectures which can support a direct-mapped 1804 * physical to virtual region can return the appropriate address 1805 * within that region, leaving '*virt' unchanged. Other 1806 * architectures should map the pages starting at '*virt' and 1807 * update '*virt' with the first usable address after the mapped 1808 * region. 1809 */ 1810 static vm_offset_t 1811 __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1812 int prot) 1813 { 1814 vm_offset_t va, sva; 1815 vm_paddr_t superpage_offset; 1816 pd_entry_t newpde; 1817 1818 va = *virt; 1819 /* 1820 * Does the physical address range's size and alignment permit at 1821 * least one superpage mapping to be created? 1822 */ 1823 superpage_offset = start & PDRMASK; 1824 if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) { 1825 /* 1826 * Increase the starting virtual address so that its alignment 1827 * does not preclude the use of superpage mappings. 1828 */ 1829 if ((va & PDRMASK) < superpage_offset) 1830 va = (va & ~PDRMASK) + superpage_offset; 1831 else if ((va & PDRMASK) > superpage_offset) 1832 va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; 1833 } 1834 sva = va; 1835 while (start < end) { 1836 if ((start & PDRMASK) == 0 && end - start >= NBPDR && 1837 pseflag != 0) { 1838 KASSERT((va & PDRMASK) == 0, 1839 ("pmap_map: misaligned va %#x", va)); 1840 newpde = start | PG_PS | PG_RW | PG_V; 1841 pmap_kenter_pde(va, newpde); 1842 va += NBPDR; 1843 start += NBPDR; 1844 } else { 1845 pmap_kenter(va, start); 1846 va += PAGE_SIZE; 1847 start += PAGE_SIZE; 1848 } 1849 } 1850 pmap_invalidate_range_int(kernel_pmap, sva, va); 1851 *virt = va; 1852 return (sva); 1853 } 1854 1855 /* 1856 * Add a list of wired pages to the kva 1857 * this routine is only used for temporary 1858 * kernel mappings that do not need to have 1859 * page modification or references recorded. 1860 * Note that old mappings are simply written 1861 * over. The page *must* be wired. 1862 * Note: SMP coherent. Uses a ranged shootdown IPI. 1863 */ 1864 static void 1865 __CONCAT(PMTYPE, qenter)(vm_offset_t sva, vm_page_t *ma, int count) 1866 { 1867 pt_entry_t *endpte, oldpte, pa, *pte; 1868 vm_page_t m; 1869 1870 oldpte = 0; 1871 pte = vtopte(sva); 1872 endpte = pte + count; 1873 while (pte < endpte) { 1874 m = *ma++; 1875 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap, 1876 m->md.pat_mode, 0); 1877 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { 1878 oldpte |= *pte; 1879 pte_store(pte, pa | pg_nx | PG_RW | PG_V); 1880 } 1881 pte++; 1882 } 1883 if (__predict_false((oldpte & PG_V) != 0)) 1884 pmap_invalidate_range_int(kernel_pmap, sva, sva + count * 1885 PAGE_SIZE); 1886 } 1887 1888 /* 1889 * This routine tears out page mappings from the 1890 * kernel -- it is meant only for temporary mappings. 1891 * Note: SMP coherent. Uses a ranged shootdown IPI. 1892 */ 1893 static void 1894 __CONCAT(PMTYPE, qremove)(vm_offset_t sva, int count) 1895 { 1896 vm_offset_t va; 1897 1898 va = sva; 1899 while (count-- > 0) { 1900 pmap_kremove(va); 1901 va += PAGE_SIZE; 1902 } 1903 pmap_invalidate_range_int(kernel_pmap, sva, va); 1904 } 1905 1906 /*************************************************** 1907 * Page table page management routines..... 1908 ***************************************************/ 1909 /* 1910 * Schedule the specified unused page table page to be freed. Specifically, 1911 * add the page to the specified list of pages that will be released to the 1912 * physical memory manager after the TLB has been updated. 1913 */ 1914 static __inline void 1915 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 1916 boolean_t set_PG_ZERO) 1917 { 1918 1919 if (set_PG_ZERO) 1920 m->flags |= PG_ZERO; 1921 else 1922 m->flags &= ~PG_ZERO; 1923 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 1924 } 1925 1926 /* 1927 * Inserts the specified page table page into the specified pmap's collection 1928 * of idle page table pages. Each of a pmap's page table pages is responsible 1929 * for mapping a distinct range of virtual addresses. The pmap's collection is 1930 * ordered by this virtual address range. 1931 * 1932 * If "promoted" is false, then the page table page "mpte" must be zero filled. 1933 */ 1934 static __inline int 1935 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted) 1936 { 1937 1938 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1939 mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0; 1940 return (vm_radix_insert(&pmap->pm_root, mpte)); 1941 } 1942 1943 /* 1944 * Removes the page table page mapping the specified virtual address from the 1945 * specified pmap's collection of idle page table pages, and returns it. 1946 * Otherwise, returns NULL if there is no page table page corresponding to the 1947 * specified virtual address. 1948 */ 1949 static __inline vm_page_t 1950 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) 1951 { 1952 1953 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1954 return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT)); 1955 } 1956 1957 /* 1958 * Decrements a page table page's reference count, which is used to record the 1959 * number of valid page table entries within the page. If the reference count 1960 * drops to zero, then the page table page is unmapped. Returns TRUE if the 1961 * page table page was unmapped and FALSE otherwise. 1962 */ 1963 static inline boolean_t 1964 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1965 { 1966 1967 --m->ref_count; 1968 if (m->ref_count == 0) { 1969 _pmap_unwire_ptp(pmap, m, free); 1970 return (TRUE); 1971 } else 1972 return (FALSE); 1973 } 1974 1975 static void 1976 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1977 { 1978 1979 /* 1980 * unmap the page table page 1981 */ 1982 pmap->pm_pdir[m->pindex] = 0; 1983 --pmap->pm_stats.resident_count; 1984 1985 /* 1986 * There is not need to invalidate the recursive mapping since 1987 * we never instantiate such mapping for the usermode pmaps, 1988 * and never remove page table pages from the kernel pmap. 1989 * Put page on a list so that it is released since all TLB 1990 * shootdown is done. 1991 */ 1992 MPASS(pmap != kernel_pmap); 1993 pmap_add_delayed_free_list(m, free, TRUE); 1994 } 1995 1996 /* 1997 * After removing a page table entry, this routine is used to 1998 * conditionally free the page, and manage the reference count. 1999 */ 2000 static int 2001 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) 2002 { 2003 pd_entry_t ptepde; 2004 vm_page_t mpte; 2005 2006 if (pmap == kernel_pmap) 2007 return (0); 2008 ptepde = *pmap_pde(pmap, va); 2009 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 2010 return (pmap_unwire_ptp(pmap, mpte, free)); 2011 } 2012 2013 /* 2014 * Release a page table page reference after a failed attempt to create a 2015 * mapping. 2016 */ 2017 static void 2018 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 2019 { 2020 struct spglist free; 2021 2022 SLIST_INIT(&free); 2023 if (pmap_unwire_ptp(pmap, mpte, &free)) { 2024 /* 2025 * Although "va" was never mapped, paging-structure caches 2026 * could nonetheless have entries that refer to the freed 2027 * page table pages. Invalidate those entries. 2028 */ 2029 pmap_invalidate_page_int(pmap, va); 2030 vm_page_free_pages_toq(&free, true); 2031 } 2032 } 2033 2034 /* 2035 * Initialize the pmap for the swapper process. 2036 */ 2037 static void 2038 __CONCAT(PMTYPE, pinit0)(pmap_t pmap) 2039 { 2040 2041 PMAP_LOCK_INIT(pmap); 2042 pmap->pm_pdir = IdlePTD; 2043 #ifdef PMAP_PAE_COMP 2044 pmap->pm_pdpt = IdlePDPT; 2045 #endif 2046 vm_radix_init(&pmap->pm_root); 2047 CPU_ZERO(&pmap->pm_active); 2048 TAILQ_INIT(&pmap->pm_pvchunk); 2049 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2050 pmap_activate_boot(pmap); 2051 } 2052 2053 /* 2054 * Initialize a preallocated and zeroed pmap structure, 2055 * such as one in a vmspace structure. 2056 */ 2057 static int 2058 __CONCAT(PMTYPE, pinit)(pmap_t pmap) 2059 { 2060 int i; 2061 2062 /* 2063 * No need to allocate page table space yet but we do need a valid 2064 * page directory table. 2065 */ 2066 if (pmap->pm_pdir == NULL) { 2067 pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); 2068 if (pmap->pm_pdir == NULL) 2069 return (0); 2070 #ifdef PMAP_PAE_COMP 2071 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 2072 KASSERT(((vm_offset_t)pmap->pm_pdpt & 2073 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 2074 ("pmap_pinit: pdpt misaligned")); 2075 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 2076 ("pmap_pinit: pdpt above 4g")); 2077 #endif 2078 vm_radix_init(&pmap->pm_root); 2079 } 2080 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2081 ("pmap_pinit: pmap has reserved page table page(s)")); 2082 2083 /* 2084 * allocate the page directory page(s) 2085 */ 2086 for (i = 0; i < NPGPTD; i++) { 2087 pmap->pm_ptdpg[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED | 2088 VM_ALLOC_ZERO | VM_ALLOC_WAITOK); 2089 #ifdef PMAP_PAE_COMP 2090 pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(pmap->pm_ptdpg[i]) | PG_V; 2091 #endif 2092 } 2093 2094 pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD); 2095 #ifdef PMAP_PAE_COMP 2096 if ((cpu_feature & CPUID_PAT) == 0) { 2097 pmap_invalidate_cache_range( 2098 trunc_page((vm_offset_t)pmap->pm_pdpt), 2099 round_page((vm_offset_t)pmap->pm_pdpt + 2100 NPGPTD * sizeof(pdpt_entry_t))); 2101 } 2102 #endif 2103 2104 /* Install the trampoline mapping. */ 2105 pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; 2106 2107 CPU_ZERO(&pmap->pm_active); 2108 TAILQ_INIT(&pmap->pm_pvchunk); 2109 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2110 2111 return (1); 2112 } 2113 2114 /* 2115 * this routine is called if the page table page is not 2116 * mapped correctly. 2117 */ 2118 static vm_page_t 2119 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) 2120 { 2121 vm_paddr_t ptepa; 2122 vm_page_t m; 2123 2124 /* 2125 * Allocate a page table page. 2126 */ 2127 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 2128 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2129 PMAP_UNLOCK(pmap); 2130 rw_wunlock(&pvh_global_lock); 2131 vm_wait(NULL); 2132 rw_wlock(&pvh_global_lock); 2133 PMAP_LOCK(pmap); 2134 } 2135 2136 /* 2137 * Indicate the need to retry. While waiting, the page table 2138 * page may have been allocated. 2139 */ 2140 return (NULL); 2141 } 2142 m->pindex = ptepindex; 2143 2144 /* 2145 * Map the pagetable page into the process address space, if 2146 * it isn't already there. 2147 */ 2148 2149 pmap->pm_stats.resident_count++; 2150 2151 ptepa = VM_PAGE_TO_PHYS(m); 2152 KASSERT((pmap->pm_pdir[ptepindex] & PG_V) == 0, 2153 ("%s: page directory entry %#jx is valid", 2154 __func__, (uintmax_t)pmap->pm_pdir[ptepindex])); 2155 pmap->pm_pdir[ptepindex] = 2156 (pd_entry_t)(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 2157 2158 return (m); 2159 } 2160 2161 static vm_page_t 2162 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) 2163 { 2164 u_int ptepindex; 2165 pd_entry_t ptepa; 2166 vm_page_t m; 2167 2168 /* 2169 * Calculate pagetable page index 2170 */ 2171 ptepindex = va >> PDRSHIFT; 2172 retry: 2173 /* 2174 * Get the page directory entry 2175 */ 2176 ptepa = pmap->pm_pdir[ptepindex]; 2177 2178 /* 2179 * This supports switching from a 4MB page to a 2180 * normal 4K page. 2181 */ 2182 if (ptepa & PG_PS) { 2183 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va); 2184 ptepa = pmap->pm_pdir[ptepindex]; 2185 } 2186 2187 /* 2188 * If the page table page is mapped, we just increment the 2189 * hold count, and activate it. 2190 */ 2191 if (ptepa) { 2192 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 2193 m->ref_count++; 2194 } else { 2195 /* 2196 * Here if the pte page isn't mapped, or if it has 2197 * been deallocated. 2198 */ 2199 m = _pmap_allocpte(pmap, ptepindex, flags); 2200 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2201 goto retry; 2202 } 2203 return (m); 2204 } 2205 2206 /*************************************************** 2207 * Pmap allocation/deallocation routines. 2208 ***************************************************/ 2209 2210 /* 2211 * Release any resources held by the given physical map. 2212 * Called when a pmap initialized by pmap_pinit is being released. 2213 * Should only be called if the map contains no valid mappings. 2214 */ 2215 static void 2216 __CONCAT(PMTYPE, release)(pmap_t pmap) 2217 { 2218 vm_page_t m; 2219 int i; 2220 2221 KASSERT(pmap->pm_stats.resident_count == 0, 2222 ("pmap_release: pmap resident count %ld != 0", 2223 pmap->pm_stats.resident_count)); 2224 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2225 ("pmap_release: pmap has reserved page table page(s)")); 2226 KASSERT(CPU_EMPTY(&pmap->pm_active), 2227 ("releasing active pmap %p", pmap)); 2228 2229 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 2230 2231 for (i = 0; i < NPGPTD; i++) { 2232 m = pmap->pm_ptdpg[i]; 2233 #ifdef PMAP_PAE_COMP 2234 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 2235 ("pmap_release: got wrong ptd page")); 2236 #endif 2237 vm_page_unwire_noq(m); 2238 vm_page_free(m); 2239 } 2240 } 2241 2242 /* 2243 * grow the number of kernel page table entries, if needed 2244 */ 2245 static void 2246 __CONCAT(PMTYPE, growkernel)(vm_offset_t addr) 2247 { 2248 vm_paddr_t ptppaddr; 2249 vm_page_t nkpg; 2250 pd_entry_t newpdir; 2251 2252 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2253 addr = roundup2(addr, NBPDR); 2254 if (addr - 1 >= vm_map_max(kernel_map)) 2255 addr = vm_map_max(kernel_map); 2256 while (kernel_vm_end < addr) { 2257 if (pdir_pde(PTD, kernel_vm_end)) { 2258 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2259 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2260 kernel_vm_end = vm_map_max(kernel_map); 2261 break; 2262 } 2263 continue; 2264 } 2265 2266 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | 2267 VM_ALLOC_ZERO); 2268 if (nkpg == NULL) 2269 panic("pmap_growkernel: no memory to grow kernel"); 2270 nkpg->pindex = kernel_vm_end >> PDRSHIFT; 2271 nkpt++; 2272 2273 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 2274 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 2275 pdir_pde(KPTD, kernel_vm_end) = newpdir; 2276 2277 pmap_kenter_pde(kernel_vm_end, newpdir); 2278 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2279 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2280 kernel_vm_end = vm_map_max(kernel_map); 2281 break; 2282 } 2283 } 2284 } 2285 2286 /*************************************************** 2287 * page management routines. 2288 ***************************************************/ 2289 2290 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 2291 CTASSERT(_NPCM == 11); 2292 CTASSERT(_NPCPV == 336); 2293 2294 static __inline struct pv_chunk * 2295 pv_to_chunk(pv_entry_t pv) 2296 { 2297 2298 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 2299 } 2300 2301 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2302 2303 #define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 2304 #define PC_FREE10 0x0000fffful /* Free values for index 10 */ 2305 2306 static const uint32_t pc_freemask[_NPCM] = { 2307 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2308 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2309 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2310 PC_FREE0_9, PC_FREE10 2311 }; 2312 2313 #ifdef PV_STATS 2314 extern int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2315 extern long pv_entry_frees, pv_entry_allocs; 2316 extern int pv_entry_spare; 2317 #endif 2318 2319 /* 2320 * We are in a serious low memory condition. Resort to 2321 * drastic measures to free some pages so we can allocate 2322 * another pv entry chunk. 2323 */ 2324 static vm_page_t 2325 pmap_pv_reclaim(pmap_t locked_pmap) 2326 { 2327 struct pch newtail; 2328 struct pv_chunk *pc; 2329 struct md_page *pvh; 2330 pd_entry_t *pde; 2331 pmap_t pmap; 2332 pt_entry_t *pte, tpte; 2333 pv_entry_t pv; 2334 vm_offset_t va; 2335 vm_page_t m, m_pc; 2336 struct spglist free; 2337 uint32_t inuse; 2338 int bit, field, freed; 2339 2340 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2341 pmap = NULL; 2342 m_pc = NULL; 2343 SLIST_INIT(&free); 2344 TAILQ_INIT(&newtail); 2345 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2346 SLIST_EMPTY(&free))) { 2347 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2348 if (pmap != pc->pc_pmap) { 2349 if (pmap != NULL) { 2350 pmap_invalidate_all_int(pmap); 2351 if (pmap != locked_pmap) 2352 PMAP_UNLOCK(pmap); 2353 } 2354 pmap = pc->pc_pmap; 2355 /* Avoid deadlock and lock recursion. */ 2356 if (pmap > locked_pmap) 2357 PMAP_LOCK(pmap); 2358 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2359 pmap = NULL; 2360 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2361 continue; 2362 } 2363 } 2364 2365 /* 2366 * Destroy every non-wired, 4 KB page mapping in the chunk. 2367 */ 2368 freed = 0; 2369 for (field = 0; field < _NPCM; field++) { 2370 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2371 inuse != 0; inuse &= ~(1UL << bit)) { 2372 bit = bsfl(inuse); 2373 pv = &pc->pc_pventry[field * 32 + bit]; 2374 va = pv->pv_va; 2375 pde = pmap_pde(pmap, va); 2376 if ((*pde & PG_PS) != 0) 2377 continue; 2378 pte = __CONCAT(PMTYPE, pte)(pmap, va); 2379 tpte = *pte; 2380 if ((tpte & PG_W) == 0) 2381 tpte = pte_load_clear(pte); 2382 pmap_pte_release(pte); 2383 if ((tpte & PG_W) != 0) 2384 continue; 2385 KASSERT(tpte != 0, 2386 ("pmap_pv_reclaim: pmap %p va %x zero pte", 2387 pmap, va)); 2388 if ((tpte & PG_G) != 0) 2389 pmap_invalidate_page_int(pmap, va); 2390 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2391 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2392 vm_page_dirty(m); 2393 if ((tpte & PG_A) != 0) 2394 vm_page_aflag_set(m, PGA_REFERENCED); 2395 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2396 if (TAILQ_EMPTY(&m->md.pv_list) && 2397 (m->flags & PG_FICTITIOUS) == 0) { 2398 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2399 if (TAILQ_EMPTY(&pvh->pv_list)) { 2400 vm_page_aflag_clear(m, 2401 PGA_WRITEABLE); 2402 } 2403 } 2404 pc->pc_map[field] |= 1UL << bit; 2405 pmap_unuse_pt(pmap, va, &free); 2406 freed++; 2407 } 2408 } 2409 if (freed == 0) { 2410 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2411 continue; 2412 } 2413 /* Every freed mapping is for a 4 KB page. */ 2414 pmap->pm_stats.resident_count -= freed; 2415 PV_STAT(pv_entry_frees += freed); 2416 PV_STAT(pv_entry_spare += freed); 2417 pv_entry_count -= freed; 2418 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2419 for (field = 0; field < _NPCM; field++) 2420 if (pc->pc_map[field] != pc_freemask[field]) { 2421 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2422 pc_list); 2423 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2424 2425 /* 2426 * One freed pv entry in locked_pmap is 2427 * sufficient. 2428 */ 2429 if (pmap == locked_pmap) 2430 goto out; 2431 break; 2432 } 2433 if (field == _NPCM) { 2434 PV_STAT(pv_entry_spare -= _NPCPV); 2435 PV_STAT(pc_chunk_count--); 2436 PV_STAT(pc_chunk_frees++); 2437 /* Entire chunk is free; return it. */ 2438 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2439 pmap_qremove((vm_offset_t)pc, 1); 2440 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2441 break; 2442 } 2443 } 2444 out: 2445 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2446 if (pmap != NULL) { 2447 pmap_invalidate_all_int(pmap); 2448 if (pmap != locked_pmap) 2449 PMAP_UNLOCK(pmap); 2450 } 2451 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2452 m_pc = SLIST_FIRST(&free); 2453 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2454 /* Recycle a freed page table page. */ 2455 m_pc->ref_count = 1; 2456 } 2457 vm_page_free_pages_toq(&free, true); 2458 return (m_pc); 2459 } 2460 2461 /* 2462 * free the pv_entry back to the free list 2463 */ 2464 static void 2465 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2466 { 2467 struct pv_chunk *pc; 2468 int idx, field, bit; 2469 2470 rw_assert(&pvh_global_lock, RA_WLOCKED); 2471 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2472 PV_STAT(pv_entry_frees++); 2473 PV_STAT(pv_entry_spare++); 2474 pv_entry_count--; 2475 pc = pv_to_chunk(pv); 2476 idx = pv - &pc->pc_pventry[0]; 2477 field = idx / 32; 2478 bit = idx % 32; 2479 pc->pc_map[field] |= 1ul << bit; 2480 for (idx = 0; idx < _NPCM; idx++) 2481 if (pc->pc_map[idx] != pc_freemask[idx]) { 2482 /* 2483 * 98% of the time, pc is already at the head of the 2484 * list. If it isn't already, move it to the head. 2485 */ 2486 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2487 pc)) { 2488 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2489 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2490 pc_list); 2491 } 2492 return; 2493 } 2494 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2495 free_pv_chunk(pc); 2496 } 2497 2498 static void 2499 free_pv_chunk(struct pv_chunk *pc) 2500 { 2501 vm_page_t m; 2502 2503 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2504 PV_STAT(pv_entry_spare -= _NPCPV); 2505 PV_STAT(pc_chunk_count--); 2506 PV_STAT(pc_chunk_frees++); 2507 /* entire chunk is free, return it */ 2508 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2509 pmap_qremove((vm_offset_t)pc, 1); 2510 vm_page_unwire_noq(m); 2511 vm_page_free(m); 2512 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2513 } 2514 2515 /* 2516 * get a new pv_entry, allocating a block from the system 2517 * when needed. 2518 */ 2519 static pv_entry_t 2520 get_pv_entry(pmap_t pmap, boolean_t try) 2521 { 2522 static const struct timeval printinterval = { 60, 0 }; 2523 static struct timeval lastprint; 2524 int bit, field; 2525 pv_entry_t pv; 2526 struct pv_chunk *pc; 2527 vm_page_t m; 2528 2529 rw_assert(&pvh_global_lock, RA_WLOCKED); 2530 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2531 PV_STAT(pv_entry_allocs++); 2532 pv_entry_count++; 2533 if (pv_entry_count > pv_entry_high_water) 2534 if (ratecheck(&lastprint, &printinterval)) 2535 printf("Approaching the limit on PV entries, consider " 2536 "increasing either the vm.pmap.shpgperproc or the " 2537 "vm.pmap.pv_entries tunable.\n"); 2538 retry: 2539 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2540 if (pc != NULL) { 2541 for (field = 0; field < _NPCM; field++) { 2542 if (pc->pc_map[field]) { 2543 bit = bsfl(pc->pc_map[field]); 2544 break; 2545 } 2546 } 2547 if (field < _NPCM) { 2548 pv = &pc->pc_pventry[field * 32 + bit]; 2549 pc->pc_map[field] &= ~(1ul << bit); 2550 /* If this was the last item, move it to tail */ 2551 for (field = 0; field < _NPCM; field++) 2552 if (pc->pc_map[field] != 0) { 2553 PV_STAT(pv_entry_spare--); 2554 return (pv); /* not full, return */ 2555 } 2556 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2557 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2558 PV_STAT(pv_entry_spare--); 2559 return (pv); 2560 } 2561 } 2562 /* 2563 * Access to the ptelist "pv_vafree" is synchronized by the pvh 2564 * global lock. If "pv_vafree" is currently non-empty, it will 2565 * remain non-empty until pmap_ptelist_alloc() completes. 2566 */ 2567 if (pv_vafree == 0 || 2568 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2569 if (try) { 2570 pv_entry_count--; 2571 PV_STAT(pc_chunk_tryfail++); 2572 return (NULL); 2573 } 2574 m = pmap_pv_reclaim(pmap); 2575 if (m == NULL) 2576 goto retry; 2577 } 2578 PV_STAT(pc_chunk_count++); 2579 PV_STAT(pc_chunk_allocs++); 2580 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2581 pmap_qenter((vm_offset_t)pc, &m, 1); 2582 pc->pc_pmap = pmap; 2583 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2584 for (field = 1; field < _NPCM; field++) 2585 pc->pc_map[field] = pc_freemask[field]; 2586 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2587 pv = &pc->pc_pventry[0]; 2588 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2589 PV_STAT(pv_entry_spare += _NPCPV - 1); 2590 return (pv); 2591 } 2592 2593 static __inline pv_entry_t 2594 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2595 { 2596 pv_entry_t pv; 2597 2598 rw_assert(&pvh_global_lock, RA_WLOCKED); 2599 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 2600 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2601 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 2602 break; 2603 } 2604 } 2605 return (pv); 2606 } 2607 2608 static void 2609 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2610 { 2611 struct md_page *pvh; 2612 pv_entry_t pv; 2613 vm_offset_t va_last; 2614 vm_page_t m; 2615 2616 rw_assert(&pvh_global_lock, RA_WLOCKED); 2617 KASSERT((pa & PDRMASK) == 0, 2618 ("pmap_pv_demote_pde: pa is not 4mpage aligned")); 2619 2620 /* 2621 * Transfer the 4mpage's pv entry for this mapping to the first 2622 * page's pv list. 2623 */ 2624 pvh = pa_to_pvh(pa); 2625 va = trunc_4mpage(va); 2626 pv = pmap_pvh_remove(pvh, pmap, va); 2627 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 2628 m = PHYS_TO_VM_PAGE(pa); 2629 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2630 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 2631 va_last = va + NBPDR - PAGE_SIZE; 2632 do { 2633 m++; 2634 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2635 ("pmap_pv_demote_pde: page %p is not managed", m)); 2636 va += PAGE_SIZE; 2637 pmap_insert_entry(pmap, va, m); 2638 } while (va < va_last); 2639 } 2640 2641 #if VM_NRESERVLEVEL > 0 2642 static void 2643 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2644 { 2645 struct md_page *pvh; 2646 pv_entry_t pv; 2647 vm_offset_t va_last; 2648 vm_page_t m; 2649 2650 rw_assert(&pvh_global_lock, RA_WLOCKED); 2651 KASSERT((pa & PDRMASK) == 0, 2652 ("pmap_pv_promote_pde: pa is not 4mpage aligned")); 2653 2654 /* 2655 * Transfer the first page's pv entry for this mapping to the 2656 * 4mpage's pv list. Aside from avoiding the cost of a call 2657 * to get_pv_entry(), a transfer avoids the possibility that 2658 * get_pv_entry() calls pmap_collect() and that pmap_collect() 2659 * removes one of the mappings that is being promoted. 2660 */ 2661 m = PHYS_TO_VM_PAGE(pa); 2662 va = trunc_4mpage(va); 2663 pv = pmap_pvh_remove(&m->md, pmap, va); 2664 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 2665 pvh = pa_to_pvh(pa); 2666 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2667 /* Free the remaining NPTEPG - 1 pv entries. */ 2668 va_last = va + NBPDR - PAGE_SIZE; 2669 do { 2670 m++; 2671 va += PAGE_SIZE; 2672 pmap_pvh_free(&m->md, pmap, va); 2673 } while (va < va_last); 2674 } 2675 #endif /* VM_NRESERVLEVEL > 0 */ 2676 2677 static void 2678 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2679 { 2680 pv_entry_t pv; 2681 2682 pv = pmap_pvh_remove(pvh, pmap, va); 2683 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2684 free_pv_entry(pmap, pv); 2685 } 2686 2687 static void 2688 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2689 { 2690 struct md_page *pvh; 2691 2692 rw_assert(&pvh_global_lock, RA_WLOCKED); 2693 pmap_pvh_free(&m->md, pmap, va); 2694 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 2695 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2696 if (TAILQ_EMPTY(&pvh->pv_list)) 2697 vm_page_aflag_clear(m, PGA_WRITEABLE); 2698 } 2699 } 2700 2701 /* 2702 * Create a pv entry for page at pa for 2703 * (pmap, va). 2704 */ 2705 static void 2706 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2707 { 2708 pv_entry_t pv; 2709 2710 rw_assert(&pvh_global_lock, RA_WLOCKED); 2711 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2712 pv = get_pv_entry(pmap, FALSE); 2713 pv->pv_va = va; 2714 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2715 } 2716 2717 /* 2718 * Conditionally create a pv entry. 2719 */ 2720 static boolean_t 2721 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2722 { 2723 pv_entry_t pv; 2724 2725 rw_assert(&pvh_global_lock, RA_WLOCKED); 2726 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2727 if (pv_entry_count < pv_entry_high_water && 2728 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2729 pv->pv_va = va; 2730 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2731 return (TRUE); 2732 } else 2733 return (FALSE); 2734 } 2735 2736 /* 2737 * Create the pv entries for each of the pages within a superpage. 2738 */ 2739 static bool 2740 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags) 2741 { 2742 struct md_page *pvh; 2743 pv_entry_t pv; 2744 bool noreclaim; 2745 2746 rw_assert(&pvh_global_lock, RA_WLOCKED); 2747 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 2748 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 2749 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 2750 return (false); 2751 pv->pv_va = va; 2752 pvh = pa_to_pvh(pde & PG_PS_FRAME); 2753 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2754 return (true); 2755 } 2756 2757 /* 2758 * Fills a page table page with mappings to consecutive physical pages. 2759 */ 2760 static void 2761 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 2762 { 2763 pt_entry_t *pte; 2764 2765 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 2766 *pte = newpte; 2767 newpte += PAGE_SIZE; 2768 } 2769 } 2770 2771 /* 2772 * Tries to demote a 2- or 4MB page mapping. If demotion fails, the 2773 * 2- or 4MB page mapping is invalidated. 2774 */ 2775 static boolean_t 2776 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2777 { 2778 pd_entry_t newpde, oldpde; 2779 pt_entry_t *firstpte, newpte; 2780 vm_paddr_t mptepa; 2781 vm_page_t mpte; 2782 struct spglist free; 2783 vm_offset_t sva; 2784 2785 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2786 oldpde = *pde; 2787 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), 2788 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); 2789 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == 2790 NULL) { 2791 KASSERT((oldpde & PG_W) == 0, 2792 ("pmap_demote_pde: page table page for a wired mapping" 2793 " is missing")); 2794 2795 /* 2796 * Invalidate the 2- or 4MB page mapping and return 2797 * "failure" if the mapping was never accessed or the 2798 * allocation of the new page table page fails. 2799 */ 2800 if ((oldpde & PG_A) == 0 || 2801 (mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2802 SLIST_INIT(&free); 2803 sva = trunc_4mpage(va); 2804 pmap_remove_pde(pmap, pde, sva, &free); 2805 if ((oldpde & PG_G) == 0) 2806 pmap_invalidate_pde_page(pmap, sva, oldpde); 2807 vm_page_free_pages_toq(&free, true); 2808 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" 2809 " in pmap %p", va, pmap); 2810 return (FALSE); 2811 } 2812 mpte->pindex = va >> PDRSHIFT; 2813 if (pmap != kernel_pmap) { 2814 mpte->ref_count = NPTEPG; 2815 pmap->pm_stats.resident_count++; 2816 } 2817 } 2818 mptepa = VM_PAGE_TO_PHYS(mpte); 2819 2820 /* 2821 * If the page mapping is in the kernel's address space, then the 2822 * KPTmap can provide access to the page table page. Otherwise, 2823 * temporarily map the page table page (mpte) into the kernel's 2824 * address space at either PADDR1 or PADDR2. 2825 */ 2826 if (pmap == kernel_pmap) 2827 firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; 2828 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 2829 if ((*PMAP1 & PG_FRAME) != mptepa) { 2830 *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2831 #ifdef SMP 2832 PMAP1cpu = PCPU_GET(cpuid); 2833 #endif 2834 invlcaddr(PADDR1); 2835 PMAP1changed++; 2836 } else 2837 #ifdef SMP 2838 if (PMAP1cpu != PCPU_GET(cpuid)) { 2839 PMAP1cpu = PCPU_GET(cpuid); 2840 invlcaddr(PADDR1); 2841 PMAP1changedcpu++; 2842 } else 2843 #endif 2844 PMAP1unchanged++; 2845 firstpte = PADDR1; 2846 } else { 2847 mtx_lock(&PMAP2mutex); 2848 if ((*PMAP2 & PG_FRAME) != mptepa) { 2849 *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2850 pmap_invalidate_page_int(kernel_pmap, 2851 (vm_offset_t)PADDR2); 2852 } 2853 firstpte = PADDR2; 2854 } 2855 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; 2856 KASSERT((oldpde & PG_A) != 0, 2857 ("pmap_demote_pde: oldpde is missing PG_A")); 2858 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 2859 ("pmap_demote_pde: oldpde is missing PG_M")); 2860 newpte = oldpde & ~PG_PS; 2861 if ((newpte & PG_PDE_PAT) != 0) 2862 newpte ^= PG_PDE_PAT | PG_PTE_PAT; 2863 2864 /* 2865 * If the page table page is not leftover from an earlier promotion, 2866 * initialize it. 2867 */ 2868 if (mpte->valid == 0) 2869 pmap_fill_ptp(firstpte, newpte); 2870 2871 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), 2872 ("pmap_demote_pde: firstpte and newpte map different physical" 2873 " addresses")); 2874 2875 /* 2876 * If the mapping has changed attributes, update the page table 2877 * entries. 2878 */ 2879 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) 2880 pmap_fill_ptp(firstpte, newpte); 2881 2882 /* 2883 * Demote the mapping. This pmap is locked. The old PDE has 2884 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 2885 * set. Thus, there is no danger of a race with another 2886 * processor changing the setting of PG_A and/or PG_M between 2887 * the read above and the store below. 2888 */ 2889 if (workaround_erratum383) 2890 pmap_update_pde(pmap, va, pde, newpde); 2891 else if (pmap == kernel_pmap) 2892 pmap_kenter_pde(va, newpde); 2893 else 2894 pde_store(pde, newpde); 2895 if (firstpte == PADDR2) 2896 mtx_unlock(&PMAP2mutex); 2897 2898 /* 2899 * Invalidate the recursive mapping of the page table page. 2900 */ 2901 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2902 2903 /* 2904 * Demote the pv entry. This depends on the earlier demotion 2905 * of the mapping. Specifically, the (re)creation of a per- 2906 * page pv entry might trigger the execution of pmap_collect(), 2907 * which might reclaim a newly (re)created per-page pv entry 2908 * and destroy the associated mapping. In order to destroy 2909 * the mapping, the PDE must have already changed from mapping 2910 * the 2mpage to referencing the page table page. 2911 */ 2912 if ((oldpde & PG_MANAGED) != 0) 2913 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); 2914 2915 pmap_pde_demotions++; 2916 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" 2917 " in pmap %p", va, pmap); 2918 return (TRUE); 2919 } 2920 2921 /* 2922 * Removes a 2- or 4MB page mapping from the kernel pmap. 2923 */ 2924 static void 2925 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2926 { 2927 pd_entry_t newpde; 2928 vm_paddr_t mptepa; 2929 vm_page_t mpte; 2930 2931 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2932 mpte = pmap_remove_pt_page(pmap, va); 2933 if (mpte == NULL) 2934 panic("pmap_remove_kernel_pde: Missing pt page."); 2935 2936 mptepa = VM_PAGE_TO_PHYS(mpte); 2937 newpde = mptepa | PG_M | PG_A | PG_RW | PG_V; 2938 2939 /* 2940 * If this page table page was unmapped by a promotion, then it 2941 * contains valid mappings. Zero it to invalidate those mappings. 2942 */ 2943 if (mpte->valid != 0) 2944 pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]); 2945 2946 /* 2947 * Remove the mapping. 2948 */ 2949 if (workaround_erratum383) 2950 pmap_update_pde(pmap, va, pde, newpde); 2951 else 2952 pmap_kenter_pde(va, newpde); 2953 2954 /* 2955 * Invalidate the recursive mapping of the page table page. 2956 */ 2957 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2958 } 2959 2960 /* 2961 * pmap_remove_pde: do the things to unmap a superpage in a process 2962 */ 2963 static void 2964 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 2965 struct spglist *free) 2966 { 2967 struct md_page *pvh; 2968 pd_entry_t oldpde; 2969 vm_offset_t eva, va; 2970 vm_page_t m, mpte; 2971 2972 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2973 KASSERT((sva & PDRMASK) == 0, 2974 ("pmap_remove_pde: sva is not 4mpage aligned")); 2975 oldpde = pte_load_clear(pdq); 2976 if (oldpde & PG_W) 2977 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; 2978 2979 /* 2980 * Machines that don't support invlpg, also don't support 2981 * PG_G. 2982 */ 2983 if ((oldpde & PG_G) != 0) 2984 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 2985 2986 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2987 if (oldpde & PG_MANAGED) { 2988 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 2989 pmap_pvh_free(pvh, pmap, sva); 2990 eva = sva + NBPDR; 2991 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 2992 va < eva; va += PAGE_SIZE, m++) { 2993 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2994 vm_page_dirty(m); 2995 if (oldpde & PG_A) 2996 vm_page_aflag_set(m, PGA_REFERENCED); 2997 if (TAILQ_EMPTY(&m->md.pv_list) && 2998 TAILQ_EMPTY(&pvh->pv_list)) 2999 vm_page_aflag_clear(m, PGA_WRITEABLE); 3000 } 3001 } 3002 if (pmap == kernel_pmap) { 3003 pmap_remove_kernel_pde(pmap, pdq, sva); 3004 } else { 3005 mpte = pmap_remove_pt_page(pmap, sva); 3006 if (mpte != NULL) { 3007 KASSERT(mpte->valid == VM_PAGE_BITS_ALL, 3008 ("pmap_remove_pde: pte page not promoted")); 3009 pmap->pm_stats.resident_count--; 3010 KASSERT(mpte->ref_count == NPTEPG, 3011 ("pmap_remove_pde: pte page ref count error")); 3012 mpte->ref_count = 0; 3013 pmap_add_delayed_free_list(mpte, free, FALSE); 3014 } 3015 } 3016 } 3017 3018 /* 3019 * pmap_remove_pte: do the things to unmap a page in a process 3020 */ 3021 static int 3022 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 3023 struct spglist *free) 3024 { 3025 pt_entry_t oldpte; 3026 vm_page_t m; 3027 3028 rw_assert(&pvh_global_lock, RA_WLOCKED); 3029 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3030 oldpte = pte_load_clear(ptq); 3031 KASSERT(oldpte != 0, 3032 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va)); 3033 if (oldpte & PG_W) 3034 pmap->pm_stats.wired_count -= 1; 3035 /* 3036 * Machines that don't support invlpg, also don't support 3037 * PG_G. 3038 */ 3039 if (oldpte & PG_G) 3040 pmap_invalidate_page_int(kernel_pmap, va); 3041 pmap->pm_stats.resident_count -= 1; 3042 if (oldpte & PG_MANAGED) { 3043 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 3044 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3045 vm_page_dirty(m); 3046 if (oldpte & PG_A) 3047 vm_page_aflag_set(m, PGA_REFERENCED); 3048 pmap_remove_entry(pmap, m, va); 3049 } 3050 return (pmap_unuse_pt(pmap, va, free)); 3051 } 3052 3053 /* 3054 * Remove a single page from a process address space 3055 */ 3056 static void 3057 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 3058 { 3059 pt_entry_t *pte; 3060 3061 rw_assert(&pvh_global_lock, RA_WLOCKED); 3062 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3063 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3064 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) 3065 return; 3066 pmap_remove_pte(pmap, pte, va, free); 3067 pmap_invalidate_page_int(pmap, va); 3068 } 3069 3070 /* 3071 * Removes the specified range of addresses from the page table page. 3072 */ 3073 static bool 3074 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3075 struct spglist *free) 3076 { 3077 pt_entry_t *pte; 3078 bool anyvalid; 3079 3080 rw_assert(&pvh_global_lock, RA_WLOCKED); 3081 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3082 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3083 anyvalid = false; 3084 for (pte = pmap_pte_quick(pmap, sva); sva != eva; pte++, 3085 sva += PAGE_SIZE) { 3086 if (*pte == 0) 3087 continue; 3088 3089 /* 3090 * The TLB entry for a PG_G mapping is invalidated by 3091 * pmap_remove_pte(). 3092 */ 3093 if ((*pte & PG_G) == 0) 3094 anyvalid = true; 3095 3096 if (pmap_remove_pte(pmap, pte, sva, free)) 3097 break; 3098 } 3099 return (anyvalid); 3100 } 3101 3102 /* 3103 * Remove the given range of addresses from the specified map. 3104 * 3105 * It is assumed that the start and end are properly 3106 * rounded to the page size. 3107 */ 3108 static void 3109 __CONCAT(PMTYPE, remove)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3110 { 3111 vm_offset_t pdnxt; 3112 pd_entry_t ptpaddr; 3113 struct spglist free; 3114 int anyvalid; 3115 3116 /* 3117 * Perform an unsynchronized read. This is, however, safe. 3118 */ 3119 if (pmap->pm_stats.resident_count == 0) 3120 return; 3121 3122 anyvalid = 0; 3123 SLIST_INIT(&free); 3124 3125 rw_wlock(&pvh_global_lock); 3126 sched_pin(); 3127 PMAP_LOCK(pmap); 3128 3129 /* 3130 * special handling of removing one page. a very 3131 * common operation and easy to short circuit some 3132 * code. 3133 */ 3134 if ((sva + PAGE_SIZE == eva) && 3135 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 3136 pmap_remove_page(pmap, sva, &free); 3137 goto out; 3138 } 3139 3140 for (; sva < eva; sva = pdnxt) { 3141 u_int pdirindex; 3142 3143 /* 3144 * Calculate index for next page table. 3145 */ 3146 pdnxt = (sva + NBPDR) & ~PDRMASK; 3147 if (pdnxt < sva) 3148 pdnxt = eva; 3149 if (pmap->pm_stats.resident_count == 0) 3150 break; 3151 3152 pdirindex = sva >> PDRSHIFT; 3153 ptpaddr = pmap->pm_pdir[pdirindex]; 3154 3155 /* 3156 * Weed out invalid mappings. Note: we assume that the page 3157 * directory table is always allocated, and in kernel virtual. 3158 */ 3159 if (ptpaddr == 0) 3160 continue; 3161 3162 /* 3163 * Check for large page. 3164 */ 3165 if ((ptpaddr & PG_PS) != 0) { 3166 /* 3167 * Are we removing the entire large page? If not, 3168 * demote the mapping and fall through. 3169 */ 3170 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3171 /* 3172 * The TLB entry for a PG_G mapping is 3173 * invalidated by pmap_remove_pde(). 3174 */ 3175 if ((ptpaddr & PG_G) == 0) 3176 anyvalid = 1; 3177 pmap_remove_pde(pmap, 3178 &pmap->pm_pdir[pdirindex], sva, &free); 3179 continue; 3180 } else if (!pmap_demote_pde(pmap, 3181 &pmap->pm_pdir[pdirindex], sva)) { 3182 /* The large page mapping was destroyed. */ 3183 continue; 3184 } 3185 } 3186 3187 /* 3188 * Limit our scan to either the end of the va represented 3189 * by the current page table page, or to the end of the 3190 * range being removed. 3191 */ 3192 if (pdnxt > eva) 3193 pdnxt = eva; 3194 3195 if (pmap_remove_ptes(pmap, sva, pdnxt, &free)) 3196 anyvalid = 1; 3197 } 3198 out: 3199 sched_unpin(); 3200 if (anyvalid) 3201 pmap_invalidate_all_int(pmap); 3202 rw_wunlock(&pvh_global_lock); 3203 PMAP_UNLOCK(pmap); 3204 vm_page_free_pages_toq(&free, true); 3205 } 3206 3207 /* 3208 * Routine: pmap_remove_all 3209 * Function: 3210 * Removes this physical page from 3211 * all physical maps in which it resides. 3212 * Reflects back modify bits to the pager. 3213 * 3214 * Notes: 3215 * Original versions of this routine were very 3216 * inefficient because they iteratively called 3217 * pmap_remove (slow...) 3218 */ 3219 3220 static void 3221 __CONCAT(PMTYPE, remove_all)(vm_page_t m) 3222 { 3223 struct md_page *pvh; 3224 pv_entry_t pv; 3225 pmap_t pmap; 3226 pt_entry_t *pte, tpte; 3227 pd_entry_t *pde; 3228 vm_offset_t va; 3229 struct spglist free; 3230 3231 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3232 ("pmap_remove_all: page %p is not managed", m)); 3233 SLIST_INIT(&free); 3234 rw_wlock(&pvh_global_lock); 3235 sched_pin(); 3236 if ((m->flags & PG_FICTITIOUS) != 0) 3237 goto small_mappings; 3238 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3239 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 3240 va = pv->pv_va; 3241 pmap = PV_PMAP(pv); 3242 PMAP_LOCK(pmap); 3243 pde = pmap_pde(pmap, va); 3244 (void)pmap_demote_pde(pmap, pde, va); 3245 PMAP_UNLOCK(pmap); 3246 } 3247 small_mappings: 3248 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3249 pmap = PV_PMAP(pv); 3250 PMAP_LOCK(pmap); 3251 pmap->pm_stats.resident_count--; 3252 pde = pmap_pde(pmap, pv->pv_va); 3253 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" 3254 " a 4mpage in page %p's pv list", m)); 3255 pte = pmap_pte_quick(pmap, pv->pv_va); 3256 tpte = pte_load_clear(pte); 3257 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte", 3258 pmap, pv->pv_va)); 3259 if (tpte & PG_W) 3260 pmap->pm_stats.wired_count--; 3261 if (tpte & PG_A) 3262 vm_page_aflag_set(m, PGA_REFERENCED); 3263 3264 /* 3265 * Update the vm_page_t clean and reference bits. 3266 */ 3267 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3268 vm_page_dirty(m); 3269 pmap_unuse_pt(pmap, pv->pv_va, &free); 3270 pmap_invalidate_page_int(pmap, pv->pv_va); 3271 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 3272 free_pv_entry(pmap, pv); 3273 PMAP_UNLOCK(pmap); 3274 } 3275 vm_page_aflag_clear(m, PGA_WRITEABLE); 3276 sched_unpin(); 3277 rw_wunlock(&pvh_global_lock); 3278 vm_page_free_pages_toq(&free, true); 3279 } 3280 3281 /* 3282 * pmap_protect_pde: do the things to protect a 4mpage in a process 3283 */ 3284 static boolean_t 3285 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) 3286 { 3287 pd_entry_t newpde, oldpde; 3288 vm_page_t m, mt; 3289 boolean_t anychanged; 3290 3291 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3292 KASSERT((sva & PDRMASK) == 0, 3293 ("pmap_protect_pde: sva is not 4mpage aligned")); 3294 anychanged = FALSE; 3295 retry: 3296 oldpde = newpde = *pde; 3297 if ((prot & VM_PROT_WRITE) == 0) { 3298 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == 3299 (PG_MANAGED | PG_M | PG_RW)) { 3300 m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3301 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 3302 vm_page_dirty(mt); 3303 } 3304 newpde &= ~(PG_RW | PG_M); 3305 } 3306 #ifdef PMAP_PAE_COMP 3307 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3308 newpde |= pg_nx; 3309 #endif 3310 if (newpde != oldpde) { 3311 /* 3312 * As an optimization to future operations on this PDE, clear 3313 * PG_PROMOTED. The impending invalidation will remove any 3314 * lingering 4KB page mappings from the TLB. 3315 */ 3316 if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED)) 3317 goto retry; 3318 if ((oldpde & PG_G) != 0) 3319 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 3320 else 3321 anychanged = TRUE; 3322 } 3323 return (anychanged); 3324 } 3325 3326 /* 3327 * Set the physical protection on the 3328 * specified range of this map as requested. 3329 */ 3330 static void 3331 __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3332 vm_prot_t prot) 3333 { 3334 vm_offset_t pdnxt; 3335 pd_entry_t ptpaddr; 3336 pt_entry_t *pte; 3337 boolean_t anychanged, pv_lists_locked; 3338 3339 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 3340 if (prot == VM_PROT_NONE) { 3341 pmap_remove(pmap, sva, eva); 3342 return; 3343 } 3344 3345 #ifdef PMAP_PAE_COMP 3346 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 3347 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 3348 return; 3349 #else 3350 if (prot & VM_PROT_WRITE) 3351 return; 3352 #endif 3353 3354 if (pmap_is_current(pmap)) 3355 pv_lists_locked = FALSE; 3356 else { 3357 pv_lists_locked = TRUE; 3358 resume: 3359 rw_wlock(&pvh_global_lock); 3360 sched_pin(); 3361 } 3362 anychanged = FALSE; 3363 3364 PMAP_LOCK(pmap); 3365 for (; sva < eva; sva = pdnxt) { 3366 pt_entry_t obits, pbits; 3367 u_int pdirindex; 3368 3369 pdnxt = (sva + NBPDR) & ~PDRMASK; 3370 if (pdnxt < sva) 3371 pdnxt = eva; 3372 3373 pdirindex = sva >> PDRSHIFT; 3374 ptpaddr = pmap->pm_pdir[pdirindex]; 3375 3376 /* 3377 * Weed out invalid mappings. Note: we assume that the page 3378 * directory table is always allocated, and in kernel virtual. 3379 */ 3380 if (ptpaddr == 0) 3381 continue; 3382 3383 /* 3384 * Check for large page. 3385 */ 3386 if ((ptpaddr & PG_PS) != 0) { 3387 /* 3388 * Are we protecting the entire large page? If not, 3389 * demote the mapping and fall through. 3390 */ 3391 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3392 /* 3393 * The TLB entry for a PG_G mapping is 3394 * invalidated by pmap_protect_pde(). 3395 */ 3396 if (pmap_protect_pde(pmap, 3397 &pmap->pm_pdir[pdirindex], sva, prot)) 3398 anychanged = TRUE; 3399 continue; 3400 } else { 3401 if (!pv_lists_locked) { 3402 pv_lists_locked = TRUE; 3403 if (!rw_try_wlock(&pvh_global_lock)) { 3404 if (anychanged) 3405 pmap_invalidate_all_int( 3406 pmap); 3407 PMAP_UNLOCK(pmap); 3408 goto resume; 3409 } 3410 sched_pin(); 3411 } 3412 if (!pmap_demote_pde(pmap, 3413 &pmap->pm_pdir[pdirindex], sva)) { 3414 /* 3415 * The large page mapping was 3416 * destroyed. 3417 */ 3418 continue; 3419 } 3420 } 3421 } 3422 3423 if (pdnxt > eva) 3424 pdnxt = eva; 3425 3426 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 3427 sva += PAGE_SIZE) { 3428 vm_page_t m; 3429 3430 retry: 3431 /* 3432 * Regardless of whether a pte is 32 or 64 bits in 3433 * size, PG_RW, PG_A, and PG_M are among the least 3434 * significant 32 bits. 3435 */ 3436 obits = pbits = *pte; 3437 if ((pbits & PG_V) == 0) 3438 continue; 3439 3440 if ((prot & VM_PROT_WRITE) == 0) { 3441 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 3442 (PG_MANAGED | PG_M | PG_RW)) { 3443 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 3444 vm_page_dirty(m); 3445 } 3446 pbits &= ~(PG_RW | PG_M); 3447 } 3448 #ifdef PMAP_PAE_COMP 3449 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3450 pbits |= pg_nx; 3451 #endif 3452 3453 if (pbits != obits) { 3454 #ifdef PMAP_PAE_COMP 3455 if (!atomic_cmpset_64(pte, obits, pbits)) 3456 goto retry; 3457 #else 3458 if (!atomic_cmpset_int((u_int *)pte, obits, 3459 pbits)) 3460 goto retry; 3461 #endif 3462 if (obits & PG_G) 3463 pmap_invalidate_page_int(pmap, sva); 3464 else 3465 anychanged = TRUE; 3466 } 3467 } 3468 } 3469 if (anychanged) 3470 pmap_invalidate_all_int(pmap); 3471 if (pv_lists_locked) { 3472 sched_unpin(); 3473 rw_wunlock(&pvh_global_lock); 3474 } 3475 PMAP_UNLOCK(pmap); 3476 } 3477 3478 #if VM_NRESERVLEVEL > 0 3479 /* 3480 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are 3481 * within a single page table page (PTP) to a single 2- or 4MB page mapping. 3482 * For promotion to occur, two conditions must be met: (1) the 4KB page 3483 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3484 * mappings must have identical characteristics. 3485 * 3486 * Managed (PG_MANAGED) mappings within the kernel address space are not 3487 * promoted. The reason is that kernel PDEs are replicated in each pmap but 3488 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel 3489 * pmap. 3490 */ 3491 static void 3492 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 3493 { 3494 pd_entry_t newpde; 3495 pt_entry_t *firstpte, oldpte, pa, *pte; 3496 #ifdef KTR 3497 vm_offset_t oldpteva; 3498 #endif 3499 vm_page_t mpte; 3500 3501 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3502 3503 /* 3504 * Examine the first PTE in the specified PTP. Abort if this PTE is 3505 * either invalid, unused, or does not map the first 4KB physical page 3506 * within a 2- or 4MB page. 3507 */ 3508 firstpte = pmap_pte_quick(pmap, trunc_4mpage(va)); 3509 setpde: 3510 newpde = *firstpte; 3511 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 3512 pmap_pde_p_failures++; 3513 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3514 " in pmap %p", va, pmap); 3515 return; 3516 } 3517 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) { 3518 pmap_pde_p_failures++; 3519 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3520 " in pmap %p", va, pmap); 3521 return; 3522 } 3523 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 3524 /* 3525 * When PG_M is already clear, PG_RW can be cleared without 3526 * a TLB invalidation. 3527 */ 3528 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde & 3529 ~PG_RW)) 3530 goto setpde; 3531 newpde &= ~PG_RW; 3532 } 3533 3534 /* 3535 * Examine each of the other PTEs in the specified PTP. Abort if this 3536 * PTE maps an unexpected 4KB physical page or does not have identical 3537 * characteristics to the first PTE. 3538 */ 3539 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; 3540 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 3541 setpte: 3542 oldpte = *pte; 3543 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 3544 pmap_pde_p_failures++; 3545 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3546 " in pmap %p", va, pmap); 3547 return; 3548 } 3549 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 3550 /* 3551 * When PG_M is already clear, PG_RW can be cleared 3552 * without a TLB invalidation. 3553 */ 3554 if (!atomic_cmpset_int((u_int *)pte, oldpte, 3555 oldpte & ~PG_RW)) 3556 goto setpte; 3557 oldpte &= ~PG_RW; 3558 #ifdef KTR 3559 oldpteva = (oldpte & PG_FRAME & PDRMASK) | 3560 (va & ~PDRMASK); 3561 #endif 3562 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x" 3563 " in pmap %p", oldpteva, pmap); 3564 } 3565 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 3566 pmap_pde_p_failures++; 3567 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3568 " in pmap %p", va, pmap); 3569 return; 3570 } 3571 pa -= PAGE_SIZE; 3572 } 3573 3574 /* 3575 * Save the page table page in its current state until the PDE 3576 * mapping the superpage is demoted by pmap_demote_pde() or 3577 * destroyed by pmap_remove_pde(). 3578 */ 3579 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3580 KASSERT(mpte >= vm_page_array && 3581 mpte < &vm_page_array[vm_page_array_size], 3582 ("pmap_promote_pde: page table page is out of range")); 3583 KASSERT(mpte->pindex == va >> PDRSHIFT, 3584 ("pmap_promote_pde: page table page's pindex is wrong")); 3585 if (pmap_insert_pt_page(pmap, mpte, true)) { 3586 pmap_pde_p_failures++; 3587 CTR2(KTR_PMAP, 3588 "pmap_promote_pde: failure for va %#x in pmap %p", va, 3589 pmap); 3590 return; 3591 } 3592 3593 /* 3594 * Promote the pv entries. 3595 */ 3596 if ((newpde & PG_MANAGED) != 0) 3597 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME); 3598 3599 /* 3600 * Propagate the PAT index to its proper position. 3601 */ 3602 if ((newpde & PG_PTE_PAT) != 0) 3603 newpde ^= PG_PDE_PAT | PG_PTE_PAT; 3604 3605 /* 3606 * Map the superpage. 3607 */ 3608 if (workaround_erratum383) 3609 pmap_update_pde(pmap, va, pde, PG_PS | newpde); 3610 else if (pmap == kernel_pmap) 3611 pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde); 3612 else 3613 pde_store(pde, PG_PROMOTED | PG_PS | newpde); 3614 3615 pmap_pde_promotions++; 3616 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" 3617 " in pmap %p", va, pmap); 3618 } 3619 #endif /* VM_NRESERVLEVEL > 0 */ 3620 3621 /* 3622 * Insert the given physical page (p) at 3623 * the specified virtual address (v) in the 3624 * target physical map with the protection requested. 3625 * 3626 * If specified, the page will be wired down, meaning 3627 * that the related pte can not be reclaimed. 3628 * 3629 * NB: This is the only routine which MAY NOT lazy-evaluate 3630 * or lose information. That is, this routine must actually 3631 * insert this page into the given map NOW. 3632 */ 3633 static int 3634 __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m, 3635 vm_prot_t prot, u_int flags, int8_t psind) 3636 { 3637 pd_entry_t *pde; 3638 pt_entry_t *pte; 3639 pt_entry_t newpte, origpte; 3640 pv_entry_t pv; 3641 vm_paddr_t opa, pa; 3642 vm_page_t mpte, om; 3643 int rv; 3644 3645 va = trunc_page(va); 3646 KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) || 3647 (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS), 3648 ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va)); 3649 KASSERT(va < PMAP_TRM_MIN_ADDRESS, 3650 ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", 3651 va)); 3652 KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || 3653 !VA_IS_CLEANMAP(va), 3654 ("pmap_enter: managed mapping within the clean submap")); 3655 if ((m->oflags & VPO_UNMANAGED) == 0) 3656 VM_PAGE_OBJECT_BUSY_ASSERT(m); 3657 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3658 ("pmap_enter: flags %u has reserved bits set", flags)); 3659 pa = VM_PAGE_TO_PHYS(m); 3660 newpte = (pt_entry_t)(pa | PG_A | PG_V); 3661 if ((flags & VM_PROT_WRITE) != 0) 3662 newpte |= PG_M; 3663 if ((prot & VM_PROT_WRITE) != 0) 3664 newpte |= PG_RW; 3665 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 3666 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 3667 #ifdef PMAP_PAE_COMP 3668 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3669 newpte |= pg_nx; 3670 #endif 3671 if ((flags & PMAP_ENTER_WIRED) != 0) 3672 newpte |= PG_W; 3673 if (pmap != kernel_pmap) 3674 newpte |= PG_U; 3675 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); 3676 if ((m->oflags & VPO_UNMANAGED) == 0) 3677 newpte |= PG_MANAGED; 3678 3679 rw_wlock(&pvh_global_lock); 3680 PMAP_LOCK(pmap); 3681 sched_pin(); 3682 if (psind == 1) { 3683 /* Assert the required virtual and physical alignment. */ 3684 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned")); 3685 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); 3686 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m); 3687 goto out; 3688 } 3689 3690 pde = pmap_pde(pmap, va); 3691 if (pmap != kernel_pmap) { 3692 /* 3693 * va is for UVA. 3694 * In the case that a page table page is not resident, 3695 * we are creating it here. pmap_allocpte() handles 3696 * demotion. 3697 */ 3698 mpte = pmap_allocpte(pmap, va, flags); 3699 if (mpte == NULL) { 3700 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3701 ("pmap_allocpte failed with sleep allowed")); 3702 rv = KERN_RESOURCE_SHORTAGE; 3703 goto out; 3704 } 3705 } else { 3706 /* 3707 * va is for KVA, so pmap_demote_pde() will never fail 3708 * to install a page table page. PG_V is also 3709 * asserted by pmap_demote_pde(). 3710 */ 3711 mpte = NULL; 3712 KASSERT(pde != NULL && (*pde & PG_V) != 0, 3713 ("KVA %#x invalid pde pdir %#jx", va, 3714 (uintmax_t)pmap->pm_pdir[PTDPTDI])); 3715 if ((*pde & PG_PS) != 0) 3716 pmap_demote_pde(pmap, pde, va); 3717 } 3718 pte = pmap_pte_quick(pmap, va); 3719 3720 /* 3721 * Page Directory table entry is not valid, which should not 3722 * happen. We should have either allocated the page table 3723 * page or demoted the existing mapping above. 3724 */ 3725 if (pte == NULL) { 3726 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 3727 (uintmax_t)pmap->pm_pdir[PTDPTDI], va); 3728 } 3729 3730 origpte = *pte; 3731 pv = NULL; 3732 3733 /* 3734 * Is the specified virtual address already mapped? 3735 */ 3736 if ((origpte & PG_V) != 0) { 3737 /* 3738 * Wiring change, just update stats. We don't worry about 3739 * wiring PT pages as they remain resident as long as there 3740 * are valid mappings in them. Hence, if a user page is wired, 3741 * the PT page will be also. 3742 */ 3743 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 3744 pmap->pm_stats.wired_count++; 3745 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 3746 pmap->pm_stats.wired_count--; 3747 3748 /* 3749 * Remove the extra PT page reference. 3750 */ 3751 if (mpte != NULL) { 3752 mpte->ref_count--; 3753 KASSERT(mpte->ref_count > 0, 3754 ("pmap_enter: missing reference to page table page," 3755 " va: 0x%x", va)); 3756 } 3757 3758 /* 3759 * Has the physical page changed? 3760 */ 3761 opa = origpte & PG_FRAME; 3762 if (opa == pa) { 3763 /* 3764 * No, might be a protection or wiring change. 3765 */ 3766 if ((origpte & PG_MANAGED) != 0 && 3767 (newpte & PG_RW) != 0) 3768 vm_page_aflag_set(m, PGA_WRITEABLE); 3769 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 3770 goto unchanged; 3771 goto validate; 3772 } 3773 3774 /* 3775 * The physical page has changed. Temporarily invalidate 3776 * the mapping. This ensures that all threads sharing the 3777 * pmap keep a consistent view of the mapping, which is 3778 * necessary for the correct handling of COW faults. It 3779 * also permits reuse of the old mapping's PV entry, 3780 * avoiding an allocation. 3781 * 3782 * For consistency, handle unmanaged mappings the same way. 3783 */ 3784 origpte = pte_load_clear(pte); 3785 KASSERT((origpte & PG_FRAME) == opa, 3786 ("pmap_enter: unexpected pa update for %#x", va)); 3787 if ((origpte & PG_MANAGED) != 0) { 3788 om = PHYS_TO_VM_PAGE(opa); 3789 3790 /* 3791 * The pmap lock is sufficient to synchronize with 3792 * concurrent calls to pmap_page_test_mappings() and 3793 * pmap_ts_referenced(). 3794 */ 3795 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3796 vm_page_dirty(om); 3797 if ((origpte & PG_A) != 0) { 3798 pmap_invalidate_page_int(pmap, va); 3799 vm_page_aflag_set(om, PGA_REFERENCED); 3800 } 3801 pv = pmap_pvh_remove(&om->md, pmap, va); 3802 KASSERT(pv != NULL, 3803 ("pmap_enter: no PV entry for %#x", va)); 3804 if ((newpte & PG_MANAGED) == 0) 3805 free_pv_entry(pmap, pv); 3806 if ((om->a.flags & PGA_WRITEABLE) != 0 && 3807 TAILQ_EMPTY(&om->md.pv_list) && 3808 ((om->flags & PG_FICTITIOUS) != 0 || 3809 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 3810 vm_page_aflag_clear(om, PGA_WRITEABLE); 3811 } else { 3812 /* 3813 * Since this mapping is unmanaged, assume that PG_A 3814 * is set. 3815 */ 3816 pmap_invalidate_page_int(pmap, va); 3817 } 3818 origpte = 0; 3819 } else { 3820 /* 3821 * Increment the counters. 3822 */ 3823 if ((newpte & PG_W) != 0) 3824 pmap->pm_stats.wired_count++; 3825 pmap->pm_stats.resident_count++; 3826 } 3827 3828 /* 3829 * Enter on the PV list if part of our managed memory. 3830 */ 3831 if ((newpte & PG_MANAGED) != 0) { 3832 if (pv == NULL) { 3833 pv = get_pv_entry(pmap, FALSE); 3834 pv->pv_va = va; 3835 } 3836 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3837 if ((newpte & PG_RW) != 0) 3838 vm_page_aflag_set(m, PGA_WRITEABLE); 3839 } 3840 3841 /* 3842 * Update the PTE. 3843 */ 3844 if ((origpte & PG_V) != 0) { 3845 validate: 3846 origpte = pte_load_store(pte, newpte); 3847 KASSERT((origpte & PG_FRAME) == pa, 3848 ("pmap_enter: unexpected pa update for %#x", va)); 3849 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == 3850 (PG_M | PG_RW)) { 3851 if ((origpte & PG_MANAGED) != 0) 3852 vm_page_dirty(m); 3853 3854 /* 3855 * Although the PTE may still have PG_RW set, TLB 3856 * invalidation may nonetheless be required because 3857 * the PTE no longer has PG_M set. 3858 */ 3859 } 3860 #ifdef PMAP_PAE_COMP 3861 else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { 3862 /* 3863 * This PTE change does not require TLB invalidation. 3864 */ 3865 goto unchanged; 3866 } 3867 #endif 3868 if ((origpte & PG_A) != 0) 3869 pmap_invalidate_page_int(pmap, va); 3870 } else 3871 pte_store_zero(pte, newpte); 3872 3873 unchanged: 3874 3875 #if VM_NRESERVLEVEL > 0 3876 /* 3877 * If both the page table page and the reservation are fully 3878 * populated, then attempt promotion. 3879 */ 3880 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 3881 pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 && 3882 vm_reserv_level_iffullpop(m) == 0) 3883 pmap_promote_pde(pmap, pde, va); 3884 #endif 3885 3886 rv = KERN_SUCCESS; 3887 out: 3888 sched_unpin(); 3889 rw_wunlock(&pvh_global_lock); 3890 PMAP_UNLOCK(pmap); 3891 return (rv); 3892 } 3893 3894 /* 3895 * Tries to create a read- and/or execute-only 2 or 4 MB page mapping. Returns 3896 * true if successful. Returns false if (1) a mapping already exists at the 3897 * specified virtual address or (2) a PV entry cannot be allocated without 3898 * reclaiming another PV entry. 3899 */ 3900 static bool 3901 pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3902 { 3903 pd_entry_t newpde; 3904 3905 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3906 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | 3907 PG_PS | PG_V; 3908 if ((m->oflags & VPO_UNMANAGED) == 0) 3909 newpde |= PG_MANAGED; 3910 #ifdef PMAP_PAE_COMP 3911 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3912 newpde |= pg_nx; 3913 #endif 3914 if (pmap != kernel_pmap) 3915 newpde |= PG_U; 3916 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | 3917 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL) == 3918 KERN_SUCCESS); 3919 } 3920 3921 /* 3922 * Returns true if every page table entry in the page table page that maps 3923 * the specified kernel virtual address is zero. 3924 */ 3925 static bool 3926 pmap_every_pte_zero(vm_offset_t va) 3927 { 3928 pt_entry_t *pt_end, *pte; 3929 3930 KASSERT((va & PDRMASK) == 0, ("va is misaligned")); 3931 pte = vtopte(va); 3932 for (pt_end = pte + NPTEPG; pte < pt_end; pte++) { 3933 if (*pte != 0) 3934 return (false); 3935 } 3936 return (true); 3937 } 3938 3939 /* 3940 * Tries to create the specified 2 or 4 MB page mapping. Returns KERN_SUCCESS 3941 * if the mapping was created, and either KERN_FAILURE or 3942 * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if 3943 * PMAP_ENTER_NOREPLACE was specified and a mapping already exists at the 3944 * specified virtual address. Returns KERN_RESOURCE_SHORTAGE if 3945 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. 3946 * 3947 * The parameter "m" is only used when creating a managed, writeable mapping. 3948 */ 3949 static int 3950 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, 3951 vm_page_t m) 3952 { 3953 struct spglist free; 3954 pd_entry_t oldpde, *pde; 3955 vm_page_t mt; 3956 3957 rw_assert(&pvh_global_lock, RA_WLOCKED); 3958 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, 3959 ("pmap_enter_pde: newpde is missing PG_M")); 3960 KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0, 3961 ("pmap_enter_pde: cannot create wired user mapping")); 3962 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3963 pde = pmap_pde(pmap, va); 3964 oldpde = *pde; 3965 if ((oldpde & PG_V) != 0) { 3966 if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (pmap != 3967 kernel_pmap || (oldpde & PG_PS) != 0 || 3968 !pmap_every_pte_zero(va))) { 3969 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3970 " in pmap %p", va, pmap); 3971 return (KERN_FAILURE); 3972 } 3973 /* Break the existing mapping(s). */ 3974 SLIST_INIT(&free); 3975 if ((oldpde & PG_PS) != 0) { 3976 /* 3977 * If the PDE resulted from a promotion, then a 3978 * reserved PT page could be freed. 3979 */ 3980 (void)pmap_remove_pde(pmap, pde, va, &free); 3981 if ((oldpde & PG_G) == 0) 3982 pmap_invalidate_pde_page(pmap, va, oldpde); 3983 } else { 3984 if (pmap_remove_ptes(pmap, va, va + NBPDR, &free)) 3985 pmap_invalidate_all_int(pmap); 3986 } 3987 if (pmap != kernel_pmap) { 3988 vm_page_free_pages_toq(&free, true); 3989 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", 3990 pde)); 3991 } else { 3992 KASSERT(SLIST_EMPTY(&free), 3993 ("pmap_enter_pde: freed kernel page table page")); 3994 3995 /* 3996 * Both pmap_remove_pde() and pmap_remove_ptes() will 3997 * leave the kernel page table page zero filled. 3998 */ 3999 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 4000 if (pmap_insert_pt_page(pmap, mt, false)) 4001 panic("pmap_enter_pde: trie insert failed"); 4002 } 4003 } 4004 if ((newpde & PG_MANAGED) != 0) { 4005 /* 4006 * Abort this mapping if its PV entry could not be created. 4007 */ 4008 if (!pmap_pv_insert_pde(pmap, va, newpde, flags)) { 4009 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 4010 " in pmap %p", va, pmap); 4011 return (KERN_RESOURCE_SHORTAGE); 4012 } 4013 if ((newpde & PG_RW) != 0) { 4014 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4015 vm_page_aflag_set(mt, PGA_WRITEABLE); 4016 } 4017 } 4018 4019 /* 4020 * Increment counters. 4021 */ 4022 if ((newpde & PG_W) != 0) 4023 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; 4024 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; 4025 4026 /* 4027 * Map the superpage. (This is not a promoted mapping; there will not 4028 * be any lingering 4KB page mappings in the TLB.) 4029 */ 4030 pde_store(pde, newpde); 4031 4032 pmap_pde_mappings++; 4033 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p", 4034 va, pmap); 4035 return (KERN_SUCCESS); 4036 } 4037 4038 /* 4039 * Maps a sequence of resident pages belonging to the same object. 4040 * The sequence begins with the given page m_start. This page is 4041 * mapped at the given virtual address start. Each subsequent page is 4042 * mapped at a virtual address that is offset from start by the same 4043 * amount as the page is offset from m_start within the object. The 4044 * last page in the sequence is the page with the largest offset from 4045 * m_start that can be mapped at a virtual address less than the given 4046 * virtual address end. Not every virtual page between start and end 4047 * is mapped; only those for which a resident page exists with the 4048 * corresponding offset from m_start are mapped. 4049 */ 4050 static void 4051 __CONCAT(PMTYPE, enter_object)(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4052 vm_page_t m_start, vm_prot_t prot) 4053 { 4054 vm_offset_t va; 4055 vm_page_t m, mpte; 4056 vm_pindex_t diff, psize; 4057 4058 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4059 4060 psize = atop(end - start); 4061 mpte = NULL; 4062 m = m_start; 4063 rw_wlock(&pvh_global_lock); 4064 PMAP_LOCK(pmap); 4065 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4066 va = start + ptoa(diff); 4067 if ((va & PDRMASK) == 0 && va + NBPDR <= end && 4068 m->psind == 1 && pg_ps_enabled && 4069 pmap_enter_4mpage(pmap, va, m, prot)) 4070 m = &m[NBPDR / PAGE_SIZE - 1]; 4071 else 4072 mpte = pmap_enter_quick_locked(pmap, va, m, prot, 4073 mpte); 4074 m = TAILQ_NEXT(m, listq); 4075 } 4076 rw_wunlock(&pvh_global_lock); 4077 PMAP_UNLOCK(pmap); 4078 } 4079 4080 /* 4081 * this code makes some *MAJOR* assumptions: 4082 * 1. Current pmap & pmap exists. 4083 * 2. Not wired. 4084 * 3. Read access. 4085 * 4. No page table pages. 4086 * but is *MUCH* faster than pmap_enter... 4087 */ 4088 4089 static void 4090 __CONCAT(PMTYPE, enter_quick)(pmap_t pmap, vm_offset_t va, vm_page_t m, 4091 vm_prot_t prot) 4092 { 4093 4094 rw_wlock(&pvh_global_lock); 4095 PMAP_LOCK(pmap); 4096 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4097 rw_wunlock(&pvh_global_lock); 4098 PMAP_UNLOCK(pmap); 4099 } 4100 4101 static vm_page_t 4102 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4103 vm_prot_t prot, vm_page_t mpte) 4104 { 4105 pt_entry_t newpte, *pte; 4106 4107 KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) || 4108 (m->oflags & VPO_UNMANAGED) != 0, 4109 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 4110 rw_assert(&pvh_global_lock, RA_WLOCKED); 4111 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4112 4113 /* 4114 * In the case that a page table page is not 4115 * resident, we are creating it here. 4116 */ 4117 if (pmap != kernel_pmap) { 4118 u_int ptepindex; 4119 pd_entry_t ptepa; 4120 4121 /* 4122 * Calculate pagetable page index 4123 */ 4124 ptepindex = va >> PDRSHIFT; 4125 if (mpte && (mpte->pindex == ptepindex)) { 4126 mpte->ref_count++; 4127 } else { 4128 /* 4129 * Get the page directory entry 4130 */ 4131 ptepa = pmap->pm_pdir[ptepindex]; 4132 4133 /* 4134 * If the page table page is mapped, we just increment 4135 * the hold count, and activate it. 4136 */ 4137 if (ptepa) { 4138 if (ptepa & PG_PS) 4139 return (NULL); 4140 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 4141 mpte->ref_count++; 4142 } else { 4143 mpte = _pmap_allocpte(pmap, ptepindex, 4144 PMAP_ENTER_NOSLEEP); 4145 if (mpte == NULL) 4146 return (mpte); 4147 } 4148 } 4149 } else { 4150 mpte = NULL; 4151 } 4152 4153 sched_pin(); 4154 pte = pmap_pte_quick(pmap, va); 4155 if (*pte) { 4156 if (mpte != NULL) 4157 mpte->ref_count--; 4158 sched_unpin(); 4159 return (NULL); 4160 } 4161 4162 /* 4163 * Enter on the PV list if part of our managed memory. 4164 */ 4165 if ((m->oflags & VPO_UNMANAGED) == 0 && 4166 !pmap_try_insert_pv_entry(pmap, va, m)) { 4167 if (mpte != NULL) 4168 pmap_abort_ptp(pmap, va, mpte); 4169 sched_unpin(); 4170 return (NULL); 4171 } 4172 4173 /* 4174 * Increment counters 4175 */ 4176 pmap->pm_stats.resident_count++; 4177 4178 newpte = VM_PAGE_TO_PHYS(m) | PG_V | 4179 pmap_cache_bits(pmap, m->md.pat_mode, 0); 4180 if ((m->oflags & VPO_UNMANAGED) == 0) 4181 newpte |= PG_MANAGED; 4182 #ifdef PMAP_PAE_COMP 4183 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 4184 newpte |= pg_nx; 4185 #endif 4186 if (pmap != kernel_pmap) 4187 newpte |= PG_U; 4188 pte_store_zero(pte, newpte); 4189 sched_unpin(); 4190 return (mpte); 4191 } 4192 4193 /* 4194 * Make a temporary mapping for a physical address. This is only intended 4195 * to be used for panic dumps. 4196 */ 4197 static void * 4198 __CONCAT(PMTYPE, kenter_temporary)(vm_paddr_t pa, int i) 4199 { 4200 vm_offset_t va; 4201 4202 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 4203 pmap_kenter(va, pa); 4204 invlpg(va); 4205 return ((void *)crashdumpmap); 4206 } 4207 4208 /* 4209 * This code maps large physical mmap regions into the 4210 * processor address space. Note that some shortcuts 4211 * are taken, but the code works. 4212 */ 4213 static void 4214 __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr, 4215 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 4216 { 4217 pd_entry_t *pde; 4218 vm_paddr_t pa, ptepa; 4219 vm_page_t p; 4220 int pat_mode; 4221 4222 VM_OBJECT_ASSERT_WLOCKED(object); 4223 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4224 ("pmap_object_init_pt: non-device object")); 4225 if (pg_ps_enabled && 4226 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 4227 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4228 return; 4229 p = vm_page_lookup(object, pindex); 4230 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4231 ("pmap_object_init_pt: invalid page %p", p)); 4232 pat_mode = p->md.pat_mode; 4233 4234 /* 4235 * Abort the mapping if the first page is not physically 4236 * aligned to a 2/4MB page boundary. 4237 */ 4238 ptepa = VM_PAGE_TO_PHYS(p); 4239 if (ptepa & (NBPDR - 1)) 4240 return; 4241 4242 /* 4243 * Skip the first page. Abort the mapping if the rest of 4244 * the pages are not physically contiguous or have differing 4245 * memory attributes. 4246 */ 4247 p = TAILQ_NEXT(p, listq); 4248 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 4249 pa += PAGE_SIZE) { 4250 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4251 ("pmap_object_init_pt: invalid page %p", p)); 4252 if (pa != VM_PAGE_TO_PHYS(p) || 4253 pat_mode != p->md.pat_mode) 4254 return; 4255 p = TAILQ_NEXT(p, listq); 4256 } 4257 4258 /* 4259 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 4260 * "size" is a multiple of 2/4M, adding the PAT setting to 4261 * "pa" will not affect the termination of this loop. 4262 */ 4263 PMAP_LOCK(pmap); 4264 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); 4265 pa < ptepa + size; pa += NBPDR) { 4266 pde = pmap_pde(pmap, addr); 4267 if (*pde == 0) { 4268 pde_store(pde, pa | PG_PS | PG_M | PG_A | 4269 PG_U | PG_RW | PG_V); 4270 pmap->pm_stats.resident_count += NBPDR / 4271 PAGE_SIZE; 4272 pmap_pde_mappings++; 4273 } 4274 /* Else continue on if the PDE is already valid. */ 4275 addr += NBPDR; 4276 } 4277 PMAP_UNLOCK(pmap); 4278 } 4279 } 4280 4281 /* 4282 * Clear the wired attribute from the mappings for the specified range of 4283 * addresses in the given pmap. Every valid mapping within that range 4284 * must have the wired attribute set. In contrast, invalid mappings 4285 * cannot have the wired attribute set, so they are ignored. 4286 * 4287 * The wired attribute of the page table entry is not a hardware feature, 4288 * so there is no need to invalidate any TLB entries. 4289 */ 4290 static void 4291 __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4292 { 4293 vm_offset_t pdnxt; 4294 pd_entry_t *pde; 4295 pt_entry_t *pte; 4296 boolean_t pv_lists_locked; 4297 4298 if (pmap_is_current(pmap)) 4299 pv_lists_locked = FALSE; 4300 else { 4301 pv_lists_locked = TRUE; 4302 resume: 4303 rw_wlock(&pvh_global_lock); 4304 sched_pin(); 4305 } 4306 PMAP_LOCK(pmap); 4307 for (; sva < eva; sva = pdnxt) { 4308 pdnxt = (sva + NBPDR) & ~PDRMASK; 4309 if (pdnxt < sva) 4310 pdnxt = eva; 4311 pde = pmap_pde(pmap, sva); 4312 if ((*pde & PG_V) == 0) 4313 continue; 4314 if ((*pde & PG_PS) != 0) { 4315 if ((*pde & PG_W) == 0) 4316 panic("pmap_unwire: pde %#jx is missing PG_W", 4317 (uintmax_t)*pde); 4318 4319 /* 4320 * Are we unwiring the entire large page? If not, 4321 * demote the mapping and fall through. 4322 */ 4323 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 4324 /* 4325 * Regardless of whether a pde (or pte) is 32 4326 * or 64 bits in size, PG_W is among the least 4327 * significant 32 bits. 4328 */ 4329 atomic_clear_int((u_int *)pde, PG_W); 4330 pmap->pm_stats.wired_count -= NBPDR / 4331 PAGE_SIZE; 4332 continue; 4333 } else { 4334 if (!pv_lists_locked) { 4335 pv_lists_locked = TRUE; 4336 if (!rw_try_wlock(&pvh_global_lock)) { 4337 PMAP_UNLOCK(pmap); 4338 /* Repeat sva. */ 4339 goto resume; 4340 } 4341 sched_pin(); 4342 } 4343 if (!pmap_demote_pde(pmap, pde, sva)) 4344 panic("pmap_unwire: demotion failed"); 4345 } 4346 } 4347 if (pdnxt > eva) 4348 pdnxt = eva; 4349 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 4350 sva += PAGE_SIZE) { 4351 if ((*pte & PG_V) == 0) 4352 continue; 4353 if ((*pte & PG_W) == 0) 4354 panic("pmap_unwire: pte %#jx is missing PG_W", 4355 (uintmax_t)*pte); 4356 4357 /* 4358 * PG_W must be cleared atomically. Although the pmap 4359 * lock synchronizes access to PG_W, another processor 4360 * could be setting PG_M and/or PG_A concurrently. 4361 * 4362 * PG_W is among the least significant 32 bits. 4363 */ 4364 atomic_clear_int((u_int *)pte, PG_W); 4365 pmap->pm_stats.wired_count--; 4366 } 4367 } 4368 if (pv_lists_locked) { 4369 sched_unpin(); 4370 rw_wunlock(&pvh_global_lock); 4371 } 4372 PMAP_UNLOCK(pmap); 4373 } 4374 4375 /* 4376 * Copy the range specified by src_addr/len 4377 * from the source map to the range dst_addr/len 4378 * in the destination map. 4379 * 4380 * This routine is only advisory and need not do anything. Since 4381 * current pmap is always the kernel pmap when executing in 4382 * kernel, and we do not copy from the kernel pmap to a user 4383 * pmap, this optimization is not usable in 4/4G full split i386 4384 * world. 4385 */ 4386 4387 static void 4388 __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 4389 vm_size_t len, vm_offset_t src_addr) 4390 { 4391 pt_entry_t *src_pte, *dst_pte, ptetemp; 4392 pd_entry_t srcptepaddr; 4393 vm_page_t dstmpte, srcmpte; 4394 vm_offset_t addr, end_addr, pdnxt; 4395 u_int ptepindex; 4396 4397 if (dst_addr != src_addr) 4398 return; 4399 4400 end_addr = src_addr + len; 4401 4402 rw_wlock(&pvh_global_lock); 4403 if (dst_pmap < src_pmap) { 4404 PMAP_LOCK(dst_pmap); 4405 PMAP_LOCK(src_pmap); 4406 } else { 4407 PMAP_LOCK(src_pmap); 4408 PMAP_LOCK(dst_pmap); 4409 } 4410 sched_pin(); 4411 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 4412 KASSERT(addr < PMAP_TRM_MIN_ADDRESS, 4413 ("pmap_copy: invalid to pmap_copy the trampoline")); 4414 4415 pdnxt = (addr + NBPDR) & ~PDRMASK; 4416 if (pdnxt < addr) 4417 pdnxt = end_addr; 4418 ptepindex = addr >> PDRSHIFT; 4419 4420 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 4421 if (srcptepaddr == 0) 4422 continue; 4423 4424 if (srcptepaddr & PG_PS) { 4425 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) 4426 continue; 4427 if (dst_pmap->pm_pdir[ptepindex] == 0 && 4428 ((srcptepaddr & PG_MANAGED) == 0 || 4429 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, 4430 PMAP_ENTER_NORECLAIM))) { 4431 dst_pmap->pm_pdir[ptepindex] = srcptepaddr & 4432 ~PG_W; 4433 dst_pmap->pm_stats.resident_count += 4434 NBPDR / PAGE_SIZE; 4435 pmap_pde_mappings++; 4436 } 4437 continue; 4438 } 4439 4440 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 4441 KASSERT(srcmpte->ref_count > 0, 4442 ("pmap_copy: source page table page is unused")); 4443 4444 if (pdnxt > end_addr) 4445 pdnxt = end_addr; 4446 4447 src_pte = pmap_pte_quick3(src_pmap, addr); 4448 while (addr < pdnxt) { 4449 ptetemp = *src_pte; 4450 /* 4451 * we only virtual copy managed pages 4452 */ 4453 if ((ptetemp & PG_MANAGED) != 0) { 4454 dstmpte = pmap_allocpte(dst_pmap, addr, 4455 PMAP_ENTER_NOSLEEP); 4456 if (dstmpte == NULL) 4457 goto out; 4458 dst_pte = pmap_pte_quick(dst_pmap, addr); 4459 if (*dst_pte == 0 && 4460 pmap_try_insert_pv_entry(dst_pmap, addr, 4461 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { 4462 /* 4463 * Clear the wired, modified, and 4464 * accessed (referenced) bits 4465 * during the copy. 4466 */ 4467 *dst_pte = ptetemp & ~(PG_W | PG_M | 4468 PG_A); 4469 dst_pmap->pm_stats.resident_count++; 4470 } else { 4471 pmap_abort_ptp(dst_pmap, addr, dstmpte); 4472 goto out; 4473 } 4474 if (dstmpte->ref_count >= srcmpte->ref_count) 4475 break; 4476 } 4477 addr += PAGE_SIZE; 4478 src_pte++; 4479 } 4480 } 4481 out: 4482 sched_unpin(); 4483 rw_wunlock(&pvh_global_lock); 4484 PMAP_UNLOCK(src_pmap); 4485 PMAP_UNLOCK(dst_pmap); 4486 } 4487 4488 /* 4489 * Zero 1 page of virtual memory mapped from a hardware page by the caller. 4490 */ 4491 static __inline void 4492 pagezero(void *page) 4493 { 4494 #if defined(I686_CPU) 4495 if (cpu_class == CPUCLASS_686) { 4496 if (cpu_feature & CPUID_SSE2) 4497 sse2_pagezero(page); 4498 else 4499 i686_pagezero(page); 4500 } else 4501 #endif 4502 bzero(page, PAGE_SIZE); 4503 } 4504 4505 /* 4506 * Zero the specified hardware page. 4507 */ 4508 static void 4509 __CONCAT(PMTYPE, zero_page)(vm_page_t m) 4510 { 4511 pt_entry_t *cmap_pte2; 4512 struct pcpu *pc; 4513 4514 sched_pin(); 4515 pc = get_pcpu(); 4516 cmap_pte2 = pc->pc_cmap_pte2; 4517 mtx_lock(&pc->pc_cmap_lock); 4518 if (*cmap_pte2) 4519 panic("pmap_zero_page: CMAP2 busy"); 4520 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4521 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4522 invlcaddr(pc->pc_cmap_addr2); 4523 pagezero(pc->pc_cmap_addr2); 4524 *cmap_pte2 = 0; 4525 4526 /* 4527 * Unpin the thread before releasing the lock. Otherwise the thread 4528 * could be rescheduled while still bound to the current CPU, only 4529 * to unpin itself immediately upon resuming execution. 4530 */ 4531 sched_unpin(); 4532 mtx_unlock(&pc->pc_cmap_lock); 4533 } 4534 4535 /* 4536 * Zero an area within a single hardware page. off and size must not 4537 * cover an area beyond a single hardware page. 4538 */ 4539 static void 4540 __CONCAT(PMTYPE, zero_page_area)(vm_page_t m, int off, int size) 4541 { 4542 pt_entry_t *cmap_pte2; 4543 struct pcpu *pc; 4544 4545 sched_pin(); 4546 pc = get_pcpu(); 4547 cmap_pte2 = pc->pc_cmap_pte2; 4548 mtx_lock(&pc->pc_cmap_lock); 4549 if (*cmap_pte2) 4550 panic("pmap_zero_page_area: CMAP2 busy"); 4551 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4552 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4553 invlcaddr(pc->pc_cmap_addr2); 4554 if (off == 0 && size == PAGE_SIZE) 4555 pagezero(pc->pc_cmap_addr2); 4556 else 4557 bzero(pc->pc_cmap_addr2 + off, size); 4558 *cmap_pte2 = 0; 4559 sched_unpin(); 4560 mtx_unlock(&pc->pc_cmap_lock); 4561 } 4562 4563 /* 4564 * Copy 1 specified hardware page to another. 4565 */ 4566 static void 4567 __CONCAT(PMTYPE, copy_page)(vm_page_t src, vm_page_t dst) 4568 { 4569 pt_entry_t *cmap_pte1, *cmap_pte2; 4570 struct pcpu *pc; 4571 4572 sched_pin(); 4573 pc = get_pcpu(); 4574 cmap_pte1 = pc->pc_cmap_pte1; 4575 cmap_pte2 = pc->pc_cmap_pte2; 4576 mtx_lock(&pc->pc_cmap_lock); 4577 if (*cmap_pte1) 4578 panic("pmap_copy_page: CMAP1 busy"); 4579 if (*cmap_pte2) 4580 panic("pmap_copy_page: CMAP2 busy"); 4581 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | 4582 pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0); 4583 invlcaddr(pc->pc_cmap_addr1); 4584 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | 4585 pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0); 4586 invlcaddr(pc->pc_cmap_addr2); 4587 bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE); 4588 *cmap_pte1 = 0; 4589 *cmap_pte2 = 0; 4590 sched_unpin(); 4591 mtx_unlock(&pc->pc_cmap_lock); 4592 } 4593 4594 static void 4595 __CONCAT(PMTYPE, copy_pages)(vm_page_t ma[], vm_offset_t a_offset, 4596 vm_page_t mb[], vm_offset_t b_offset, int xfersize) 4597 { 4598 vm_page_t a_pg, b_pg; 4599 char *a_cp, *b_cp; 4600 vm_offset_t a_pg_offset, b_pg_offset; 4601 pt_entry_t *cmap_pte1, *cmap_pte2; 4602 struct pcpu *pc; 4603 int cnt; 4604 4605 sched_pin(); 4606 pc = get_pcpu(); 4607 cmap_pte1 = pc->pc_cmap_pte1; 4608 cmap_pte2 = pc->pc_cmap_pte2; 4609 mtx_lock(&pc->pc_cmap_lock); 4610 if (*cmap_pte1 != 0) 4611 panic("pmap_copy_pages: CMAP1 busy"); 4612 if (*cmap_pte2 != 0) 4613 panic("pmap_copy_pages: CMAP2 busy"); 4614 while (xfersize > 0) { 4615 a_pg = ma[a_offset >> PAGE_SHIFT]; 4616 a_pg_offset = a_offset & PAGE_MASK; 4617 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 4618 b_pg = mb[b_offset >> PAGE_SHIFT]; 4619 b_pg_offset = b_offset & PAGE_MASK; 4620 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 4621 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | 4622 pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0); 4623 invlcaddr(pc->pc_cmap_addr1); 4624 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | 4625 PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0); 4626 invlcaddr(pc->pc_cmap_addr2); 4627 a_cp = pc->pc_cmap_addr1 + a_pg_offset; 4628 b_cp = pc->pc_cmap_addr2 + b_pg_offset; 4629 bcopy(a_cp, b_cp, cnt); 4630 a_offset += cnt; 4631 b_offset += cnt; 4632 xfersize -= cnt; 4633 } 4634 *cmap_pte1 = 0; 4635 *cmap_pte2 = 0; 4636 sched_unpin(); 4637 mtx_unlock(&pc->pc_cmap_lock); 4638 } 4639 4640 /* 4641 * Returns true if the pmap's pv is one of the first 4642 * 16 pvs linked to from this page. This count may 4643 * be changed upwards or downwards in the future; it 4644 * is only necessary that true be returned for a small 4645 * subset of pmaps for proper page aging. 4646 */ 4647 static boolean_t 4648 __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m) 4649 { 4650 struct md_page *pvh; 4651 pv_entry_t pv; 4652 int loops = 0; 4653 boolean_t rv; 4654 4655 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4656 ("pmap_page_exists_quick: page %p is not managed", m)); 4657 rv = FALSE; 4658 rw_wlock(&pvh_global_lock); 4659 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 4660 if (PV_PMAP(pv) == pmap) { 4661 rv = TRUE; 4662 break; 4663 } 4664 loops++; 4665 if (loops >= 16) 4666 break; 4667 } 4668 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 4669 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4670 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4671 if (PV_PMAP(pv) == pmap) { 4672 rv = TRUE; 4673 break; 4674 } 4675 loops++; 4676 if (loops >= 16) 4677 break; 4678 } 4679 } 4680 rw_wunlock(&pvh_global_lock); 4681 return (rv); 4682 } 4683 4684 /* 4685 * pmap_page_wired_mappings: 4686 * 4687 * Return the number of managed mappings to the given physical page 4688 * that are wired. 4689 */ 4690 static int 4691 __CONCAT(PMTYPE, page_wired_mappings)(vm_page_t m) 4692 { 4693 int count; 4694 4695 count = 0; 4696 if ((m->oflags & VPO_UNMANAGED) != 0) 4697 return (count); 4698 rw_wlock(&pvh_global_lock); 4699 count = pmap_pvh_wired_mappings(&m->md, count); 4700 if ((m->flags & PG_FICTITIOUS) == 0) { 4701 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 4702 count); 4703 } 4704 rw_wunlock(&pvh_global_lock); 4705 return (count); 4706 } 4707 4708 /* 4709 * pmap_pvh_wired_mappings: 4710 * 4711 * Return the updated number "count" of managed mappings that are wired. 4712 */ 4713 static int 4714 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 4715 { 4716 pmap_t pmap; 4717 pt_entry_t *pte; 4718 pv_entry_t pv; 4719 4720 rw_assert(&pvh_global_lock, RA_WLOCKED); 4721 sched_pin(); 4722 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4723 pmap = PV_PMAP(pv); 4724 PMAP_LOCK(pmap); 4725 pte = pmap_pte_quick(pmap, pv->pv_va); 4726 if ((*pte & PG_W) != 0) 4727 count++; 4728 PMAP_UNLOCK(pmap); 4729 } 4730 sched_unpin(); 4731 return (count); 4732 } 4733 4734 /* 4735 * Returns TRUE if the given page is mapped individually or as part of 4736 * a 4mpage. Otherwise, returns FALSE. 4737 */ 4738 static boolean_t 4739 __CONCAT(PMTYPE, page_is_mapped)(vm_page_t m) 4740 { 4741 boolean_t rv; 4742 4743 if ((m->oflags & VPO_UNMANAGED) != 0) 4744 return (FALSE); 4745 rw_wlock(&pvh_global_lock); 4746 rv = !TAILQ_EMPTY(&m->md.pv_list) || 4747 ((m->flags & PG_FICTITIOUS) == 0 && 4748 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 4749 rw_wunlock(&pvh_global_lock); 4750 return (rv); 4751 } 4752 4753 /* 4754 * Remove all pages from specified address space 4755 * this aids process exit speeds. Also, this code 4756 * is special cased for current process only, but 4757 * can have the more generic (and slightly slower) 4758 * mode enabled. This is much faster than pmap_remove 4759 * in the case of running down an entire address space. 4760 */ 4761 static void 4762 __CONCAT(PMTYPE, remove_pages)(pmap_t pmap) 4763 { 4764 pt_entry_t *pte, tpte; 4765 vm_page_t m, mpte, mt; 4766 pv_entry_t pv; 4767 struct md_page *pvh; 4768 struct pv_chunk *pc, *npc; 4769 struct spglist free; 4770 int field, idx; 4771 int32_t bit; 4772 uint32_t inuse, bitmask; 4773 int allfree; 4774 4775 if (pmap != PCPU_GET(curpmap)) { 4776 printf("warning: pmap_remove_pages called with non-current pmap\n"); 4777 return; 4778 } 4779 SLIST_INIT(&free); 4780 rw_wlock(&pvh_global_lock); 4781 PMAP_LOCK(pmap); 4782 sched_pin(); 4783 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4784 KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, 4785 pc->pc_pmap)); 4786 allfree = 1; 4787 for (field = 0; field < _NPCM; field++) { 4788 inuse = ~pc->pc_map[field] & pc_freemask[field]; 4789 while (inuse != 0) { 4790 bit = bsfl(inuse); 4791 bitmask = 1UL << bit; 4792 idx = field * 32 + bit; 4793 pv = &pc->pc_pventry[idx]; 4794 inuse &= ~bitmask; 4795 4796 pte = pmap_pde(pmap, pv->pv_va); 4797 tpte = *pte; 4798 if ((tpte & PG_PS) == 0) { 4799 pte = pmap_pte_quick(pmap, pv->pv_va); 4800 tpte = *pte & ~PG_PTE_PAT; 4801 } 4802 4803 if (tpte == 0) { 4804 printf( 4805 "TPTE at %p IS ZERO @ VA %08x\n", 4806 pte, pv->pv_va); 4807 panic("bad pte"); 4808 } 4809 4810 /* 4811 * We cannot remove wired pages from a process' mapping at this time 4812 */ 4813 if (tpte & PG_W) { 4814 allfree = 0; 4815 continue; 4816 } 4817 4818 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 4819 KASSERT(m->phys_addr == (tpte & PG_FRAME), 4820 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 4821 m, (uintmax_t)m->phys_addr, 4822 (uintmax_t)tpte)); 4823 4824 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4825 m < &vm_page_array[vm_page_array_size], 4826 ("pmap_remove_pages: bad tpte %#jx", 4827 (uintmax_t)tpte)); 4828 4829 pte_clear(pte); 4830 4831 /* 4832 * Update the vm_page_t clean/reference bits. 4833 */ 4834 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 4835 if ((tpte & PG_PS) != 0) { 4836 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4837 vm_page_dirty(mt); 4838 } else 4839 vm_page_dirty(m); 4840 } 4841 4842 /* Mark free */ 4843 PV_STAT(pv_entry_frees++); 4844 PV_STAT(pv_entry_spare++); 4845 pv_entry_count--; 4846 pc->pc_map[field] |= bitmask; 4847 if ((tpte & PG_PS) != 0) { 4848 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 4849 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 4850 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4851 if (TAILQ_EMPTY(&pvh->pv_list)) { 4852 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4853 if (TAILQ_EMPTY(&mt->md.pv_list)) 4854 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4855 } 4856 mpte = pmap_remove_pt_page(pmap, pv->pv_va); 4857 if (mpte != NULL) { 4858 KASSERT(mpte->valid == VM_PAGE_BITS_ALL, 4859 ("pmap_remove_pages: pte page not promoted")); 4860 pmap->pm_stats.resident_count--; 4861 KASSERT(mpte->ref_count == NPTEPG, 4862 ("pmap_remove_pages: pte page ref count error")); 4863 mpte->ref_count = 0; 4864 pmap_add_delayed_free_list(mpte, &free, FALSE); 4865 } 4866 } else { 4867 pmap->pm_stats.resident_count--; 4868 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4869 if (TAILQ_EMPTY(&m->md.pv_list) && 4870 (m->flags & PG_FICTITIOUS) == 0) { 4871 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4872 if (TAILQ_EMPTY(&pvh->pv_list)) 4873 vm_page_aflag_clear(m, PGA_WRITEABLE); 4874 } 4875 pmap_unuse_pt(pmap, pv->pv_va, &free); 4876 } 4877 } 4878 } 4879 if (allfree) { 4880 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4881 free_pv_chunk(pc); 4882 } 4883 } 4884 sched_unpin(); 4885 pmap_invalidate_all_int(pmap); 4886 rw_wunlock(&pvh_global_lock); 4887 PMAP_UNLOCK(pmap); 4888 vm_page_free_pages_toq(&free, true); 4889 } 4890 4891 /* 4892 * pmap_is_modified: 4893 * 4894 * Return whether or not the specified physical page was modified 4895 * in any physical maps. 4896 */ 4897 static boolean_t 4898 __CONCAT(PMTYPE, is_modified)(vm_page_t m) 4899 { 4900 boolean_t rv; 4901 4902 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4903 ("pmap_is_modified: page %p is not managed", m)); 4904 4905 /* 4906 * If the page is not busied then this check is racy. 4907 */ 4908 if (!pmap_page_is_write_mapped(m)) 4909 return (FALSE); 4910 rw_wlock(&pvh_global_lock); 4911 rv = pmap_is_modified_pvh(&m->md) || 4912 ((m->flags & PG_FICTITIOUS) == 0 && 4913 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4914 rw_wunlock(&pvh_global_lock); 4915 return (rv); 4916 } 4917 4918 /* 4919 * Returns TRUE if any of the given mappings were used to modify 4920 * physical memory. Otherwise, returns FALSE. Both page and 2mpage 4921 * mappings are supported. 4922 */ 4923 static boolean_t 4924 pmap_is_modified_pvh(struct md_page *pvh) 4925 { 4926 pv_entry_t pv; 4927 pt_entry_t *pte; 4928 pmap_t pmap; 4929 boolean_t rv; 4930 4931 rw_assert(&pvh_global_lock, RA_WLOCKED); 4932 rv = FALSE; 4933 sched_pin(); 4934 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4935 pmap = PV_PMAP(pv); 4936 PMAP_LOCK(pmap); 4937 pte = pmap_pte_quick(pmap, pv->pv_va); 4938 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW); 4939 PMAP_UNLOCK(pmap); 4940 if (rv) 4941 break; 4942 } 4943 sched_unpin(); 4944 return (rv); 4945 } 4946 4947 /* 4948 * pmap_is_prefaultable: 4949 * 4950 * Return whether or not the specified virtual address is elgible 4951 * for prefault. 4952 */ 4953 static boolean_t 4954 __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr) 4955 { 4956 pd_entry_t pde; 4957 boolean_t rv; 4958 4959 rv = FALSE; 4960 PMAP_LOCK(pmap); 4961 pde = *pmap_pde(pmap, addr); 4962 if (pde != 0 && (pde & PG_PS) == 0) 4963 rv = pmap_pte_ufast(pmap, addr, pde) == 0; 4964 PMAP_UNLOCK(pmap); 4965 return (rv); 4966 } 4967 4968 /* 4969 * pmap_is_referenced: 4970 * 4971 * Return whether or not the specified physical page was referenced 4972 * in any physical maps. 4973 */ 4974 static boolean_t 4975 __CONCAT(PMTYPE, is_referenced)(vm_page_t m) 4976 { 4977 boolean_t rv; 4978 4979 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4980 ("pmap_is_referenced: page %p is not managed", m)); 4981 rw_wlock(&pvh_global_lock); 4982 rv = pmap_is_referenced_pvh(&m->md) || 4983 ((m->flags & PG_FICTITIOUS) == 0 && 4984 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4985 rw_wunlock(&pvh_global_lock); 4986 return (rv); 4987 } 4988 4989 /* 4990 * Returns TRUE if any of the given mappings were referenced and FALSE 4991 * otherwise. Both page and 4mpage mappings are supported. 4992 */ 4993 static boolean_t 4994 pmap_is_referenced_pvh(struct md_page *pvh) 4995 { 4996 pv_entry_t pv; 4997 pt_entry_t *pte; 4998 pmap_t pmap; 4999 boolean_t rv; 5000 5001 rw_assert(&pvh_global_lock, RA_WLOCKED); 5002 rv = FALSE; 5003 sched_pin(); 5004 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5005 pmap = PV_PMAP(pv); 5006 PMAP_LOCK(pmap); 5007 pte = pmap_pte_quick(pmap, pv->pv_va); 5008 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 5009 PMAP_UNLOCK(pmap); 5010 if (rv) 5011 break; 5012 } 5013 sched_unpin(); 5014 return (rv); 5015 } 5016 5017 /* 5018 * Clear the write and modified bits in each of the given page's mappings. 5019 */ 5020 static void 5021 __CONCAT(PMTYPE, remove_write)(vm_page_t m) 5022 { 5023 struct md_page *pvh; 5024 pv_entry_t next_pv, pv; 5025 pmap_t pmap; 5026 pd_entry_t *pde; 5027 pt_entry_t oldpte, *pte; 5028 vm_offset_t va; 5029 5030 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5031 ("pmap_remove_write: page %p is not managed", m)); 5032 vm_page_assert_busied(m); 5033 5034 if (!pmap_page_is_write_mapped(m)) 5035 return; 5036 rw_wlock(&pvh_global_lock); 5037 sched_pin(); 5038 if ((m->flags & PG_FICTITIOUS) != 0) 5039 goto small_mappings; 5040 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5041 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5042 va = pv->pv_va; 5043 pmap = PV_PMAP(pv); 5044 PMAP_LOCK(pmap); 5045 pde = pmap_pde(pmap, va); 5046 if ((*pde & PG_RW) != 0) 5047 (void)pmap_demote_pde(pmap, pde, va); 5048 PMAP_UNLOCK(pmap); 5049 } 5050 small_mappings: 5051 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5052 pmap = PV_PMAP(pv); 5053 PMAP_LOCK(pmap); 5054 pde = pmap_pde(pmap, pv->pv_va); 5055 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found" 5056 " a 4mpage in page %p's pv list", m)); 5057 pte = pmap_pte_quick(pmap, pv->pv_va); 5058 retry: 5059 oldpte = *pte; 5060 if ((oldpte & PG_RW) != 0) { 5061 /* 5062 * Regardless of whether a pte is 32 or 64 bits 5063 * in size, PG_RW and PG_M are among the least 5064 * significant 32 bits. 5065 */ 5066 if (!atomic_cmpset_int((u_int *)pte, oldpte, 5067 oldpte & ~(PG_RW | PG_M))) 5068 goto retry; 5069 if ((oldpte & PG_M) != 0) 5070 vm_page_dirty(m); 5071 pmap_invalidate_page_int(pmap, pv->pv_va); 5072 } 5073 PMAP_UNLOCK(pmap); 5074 } 5075 vm_page_aflag_clear(m, PGA_WRITEABLE); 5076 sched_unpin(); 5077 rw_wunlock(&pvh_global_lock); 5078 } 5079 5080 /* 5081 * pmap_ts_referenced: 5082 * 5083 * Return a count of reference bits for a page, clearing those bits. 5084 * It is not necessary for every reference bit to be cleared, but it 5085 * is necessary that 0 only be returned when there are truly no 5086 * reference bits set. 5087 * 5088 * As an optimization, update the page's dirty field if a modified bit is 5089 * found while counting reference bits. This opportunistic update can be 5090 * performed at low cost and can eliminate the need for some future calls 5091 * to pmap_is_modified(). However, since this function stops after 5092 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5093 * dirty pages. Those dirty pages will only be detected by a future call 5094 * to pmap_is_modified(). 5095 */ 5096 static int 5097 __CONCAT(PMTYPE, ts_referenced)(vm_page_t m) 5098 { 5099 struct md_page *pvh; 5100 pv_entry_t pv, pvf; 5101 pmap_t pmap; 5102 pd_entry_t *pde; 5103 pt_entry_t *pte; 5104 vm_paddr_t pa; 5105 int rtval = 0; 5106 5107 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5108 ("pmap_ts_referenced: page %p is not managed", m)); 5109 pa = VM_PAGE_TO_PHYS(m); 5110 pvh = pa_to_pvh(pa); 5111 rw_wlock(&pvh_global_lock); 5112 sched_pin(); 5113 if ((m->flags & PG_FICTITIOUS) != 0 || 5114 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5115 goto small_mappings; 5116 pv = pvf; 5117 do { 5118 pmap = PV_PMAP(pv); 5119 PMAP_LOCK(pmap); 5120 pde = pmap_pde(pmap, pv->pv_va); 5121 if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5122 /* 5123 * Although "*pde" is mapping a 2/4MB page, because 5124 * this function is called at a 4KB page granularity, 5125 * we only update the 4KB page under test. 5126 */ 5127 vm_page_dirty(m); 5128 } 5129 if ((*pde & PG_A) != 0) { 5130 /* 5131 * Since this reference bit is shared by either 1024 5132 * or 512 4KB pages, it should not be cleared every 5133 * time it is tested. Apply a simple "hash" function 5134 * on the physical page number, the virtual superpage 5135 * number, and the pmap address to select one 4KB page 5136 * out of the 1024 or 512 on which testing the 5137 * reference bit will result in clearing that bit. 5138 * This function is designed to avoid the selection of 5139 * the same 4KB page for every 2- or 4MB page mapping. 5140 * 5141 * On demotion, a mapping that hasn't been referenced 5142 * is simply destroyed. To avoid the possibility of a 5143 * subsequent page fault on a demoted wired mapping, 5144 * always leave its reference bit set. Moreover, 5145 * since the superpage is wired, the current state of 5146 * its reference bit won't affect page replacement. 5147 */ 5148 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ 5149 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 5150 (*pde & PG_W) == 0) { 5151 atomic_clear_int((u_int *)pde, PG_A); 5152 pmap_invalidate_page_int(pmap, pv->pv_va); 5153 } 5154 rtval++; 5155 } 5156 PMAP_UNLOCK(pmap); 5157 /* Rotate the PV list if it has more than one entry. */ 5158 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5159 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5160 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5161 } 5162 if (rtval >= PMAP_TS_REFERENCED_MAX) 5163 goto out; 5164 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5165 small_mappings: 5166 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5167 goto out; 5168 pv = pvf; 5169 do { 5170 pmap = PV_PMAP(pv); 5171 PMAP_LOCK(pmap); 5172 pde = pmap_pde(pmap, pv->pv_va); 5173 KASSERT((*pde & PG_PS) == 0, 5174 ("pmap_ts_referenced: found a 4mpage in page %p's pv list", 5175 m)); 5176 pte = pmap_pte_quick(pmap, pv->pv_va); 5177 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5178 vm_page_dirty(m); 5179 if ((*pte & PG_A) != 0) { 5180 atomic_clear_int((u_int *)pte, PG_A); 5181 pmap_invalidate_page_int(pmap, pv->pv_va); 5182 rtval++; 5183 } 5184 PMAP_UNLOCK(pmap); 5185 /* Rotate the PV list if it has more than one entry. */ 5186 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5187 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5188 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5189 } 5190 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5191 PMAP_TS_REFERENCED_MAX); 5192 out: 5193 sched_unpin(); 5194 rw_wunlock(&pvh_global_lock); 5195 return (rtval); 5196 } 5197 5198 /* 5199 * Apply the given advice to the specified range of addresses within the 5200 * given pmap. Depending on the advice, clear the referenced and/or 5201 * modified flags in each mapping and set the mapped page's dirty field. 5202 */ 5203 static void 5204 __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 5205 int advice) 5206 { 5207 pd_entry_t oldpde, *pde; 5208 pt_entry_t *pte; 5209 vm_offset_t va, pdnxt; 5210 vm_page_t m; 5211 bool anychanged, pv_lists_locked; 5212 5213 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5214 return; 5215 if (pmap_is_current(pmap)) 5216 pv_lists_locked = false; 5217 else { 5218 pv_lists_locked = true; 5219 resume: 5220 rw_wlock(&pvh_global_lock); 5221 sched_pin(); 5222 } 5223 anychanged = false; 5224 PMAP_LOCK(pmap); 5225 for (; sva < eva; sva = pdnxt) { 5226 pdnxt = (sva + NBPDR) & ~PDRMASK; 5227 if (pdnxt < sva) 5228 pdnxt = eva; 5229 pde = pmap_pde(pmap, sva); 5230 oldpde = *pde; 5231 if ((oldpde & PG_V) == 0) 5232 continue; 5233 else if ((oldpde & PG_PS) != 0) { 5234 if ((oldpde & PG_MANAGED) == 0) 5235 continue; 5236 if (!pv_lists_locked) { 5237 pv_lists_locked = true; 5238 if (!rw_try_wlock(&pvh_global_lock)) { 5239 if (anychanged) 5240 pmap_invalidate_all_int(pmap); 5241 PMAP_UNLOCK(pmap); 5242 goto resume; 5243 } 5244 sched_pin(); 5245 } 5246 if (!pmap_demote_pde(pmap, pde, sva)) { 5247 /* 5248 * The large page mapping was destroyed. 5249 */ 5250 continue; 5251 } 5252 5253 /* 5254 * Unless the page mappings are wired, remove the 5255 * mapping to a single page so that a subsequent 5256 * access may repromote. Choosing the last page 5257 * within the address range [sva, min(pdnxt, eva)) 5258 * generally results in more repromotions. Since the 5259 * underlying page table page is fully populated, this 5260 * removal never frees a page table page. 5261 */ 5262 if ((oldpde & PG_W) == 0) { 5263 va = eva; 5264 if (va > pdnxt) 5265 va = pdnxt; 5266 va -= PAGE_SIZE; 5267 KASSERT(va >= sva, 5268 ("pmap_advise: no address gap")); 5269 pte = pmap_pte_quick(pmap, va); 5270 KASSERT((*pte & PG_V) != 0, 5271 ("pmap_advise: invalid PTE")); 5272 pmap_remove_pte(pmap, pte, va, NULL); 5273 anychanged = true; 5274 } 5275 } 5276 if (pdnxt > eva) 5277 pdnxt = eva; 5278 va = pdnxt; 5279 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 5280 sva += PAGE_SIZE) { 5281 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) 5282 goto maybe_invlrng; 5283 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5284 if (advice == MADV_DONTNEED) { 5285 /* 5286 * Future calls to pmap_is_modified() 5287 * can be avoided by making the page 5288 * dirty now. 5289 */ 5290 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 5291 vm_page_dirty(m); 5292 } 5293 atomic_clear_int((u_int *)pte, PG_M | PG_A); 5294 } else if ((*pte & PG_A) != 0) 5295 atomic_clear_int((u_int *)pte, PG_A); 5296 else 5297 goto maybe_invlrng; 5298 if ((*pte & PG_G) != 0) { 5299 if (va == pdnxt) 5300 va = sva; 5301 } else 5302 anychanged = true; 5303 continue; 5304 maybe_invlrng: 5305 if (va != pdnxt) { 5306 pmap_invalidate_range_int(pmap, va, sva); 5307 va = pdnxt; 5308 } 5309 } 5310 if (va != pdnxt) 5311 pmap_invalidate_range_int(pmap, va, sva); 5312 } 5313 if (anychanged) 5314 pmap_invalidate_all_int(pmap); 5315 if (pv_lists_locked) { 5316 sched_unpin(); 5317 rw_wunlock(&pvh_global_lock); 5318 } 5319 PMAP_UNLOCK(pmap); 5320 } 5321 5322 /* 5323 * Clear the modify bits on the specified physical page. 5324 */ 5325 static void 5326 __CONCAT(PMTYPE, clear_modify)(vm_page_t m) 5327 { 5328 struct md_page *pvh; 5329 pv_entry_t next_pv, pv; 5330 pmap_t pmap; 5331 pd_entry_t oldpde, *pde; 5332 pt_entry_t *pte; 5333 vm_offset_t va; 5334 5335 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5336 ("pmap_clear_modify: page %p is not managed", m)); 5337 vm_page_assert_busied(m); 5338 5339 if (!pmap_page_is_write_mapped(m)) 5340 return; 5341 rw_wlock(&pvh_global_lock); 5342 sched_pin(); 5343 if ((m->flags & PG_FICTITIOUS) != 0) 5344 goto small_mappings; 5345 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5346 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5347 va = pv->pv_va; 5348 pmap = PV_PMAP(pv); 5349 PMAP_LOCK(pmap); 5350 pde = pmap_pde(pmap, va); 5351 oldpde = *pde; 5352 /* If oldpde has PG_RW set, then it also has PG_M set. */ 5353 if ((oldpde & PG_RW) != 0 && 5354 pmap_demote_pde(pmap, pde, va) && 5355 (oldpde & PG_W) == 0) { 5356 /* 5357 * Write protect the mapping to a single page so that 5358 * a subsequent write access may repromote. 5359 */ 5360 va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); 5361 pte = pmap_pte_quick(pmap, va); 5362 /* 5363 * Regardless of whether a pte is 32 or 64 bits 5364 * in size, PG_RW and PG_M are among the least 5365 * significant 32 bits. 5366 */ 5367 atomic_clear_int((u_int *)pte, PG_M | PG_RW); 5368 vm_page_dirty(m); 5369 pmap_invalidate_page_int(pmap, va); 5370 } 5371 PMAP_UNLOCK(pmap); 5372 } 5373 small_mappings: 5374 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5375 pmap = PV_PMAP(pv); 5376 PMAP_LOCK(pmap); 5377 pde = pmap_pde(pmap, pv->pv_va); 5378 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" 5379 " a 4mpage in page %p's pv list", m)); 5380 pte = pmap_pte_quick(pmap, pv->pv_va); 5381 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5382 /* 5383 * Regardless of whether a pte is 32 or 64 bits 5384 * in size, PG_M is among the least significant 5385 * 32 bits. 5386 */ 5387 atomic_clear_int((u_int *)pte, PG_M); 5388 pmap_invalidate_page_int(pmap, pv->pv_va); 5389 } 5390 PMAP_UNLOCK(pmap); 5391 } 5392 sched_unpin(); 5393 rw_wunlock(&pvh_global_lock); 5394 } 5395 5396 /* 5397 * Miscellaneous support routines follow 5398 */ 5399 5400 /* Adjust the cache mode for a 4KB page mapped via a PTE. */ 5401 static __inline void 5402 pmap_pte_attr(pt_entry_t *pte, int cache_bits) 5403 { 5404 u_int opte, npte; 5405 5406 /* 5407 * The cache mode bits are all in the low 32-bits of the 5408 * PTE, so we can just spin on updating the low 32-bits. 5409 */ 5410 do { 5411 opte = *(u_int *)pte; 5412 npte = opte & ~PG_PTE_CACHE; 5413 npte |= cache_bits; 5414 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); 5415 } 5416 5417 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */ 5418 static __inline void 5419 pmap_pde_attr(pd_entry_t *pde, int cache_bits) 5420 { 5421 u_int opde, npde; 5422 5423 /* 5424 * The cache mode bits are all in the low 32-bits of the 5425 * PDE, so we can just spin on updating the low 32-bits. 5426 */ 5427 do { 5428 opde = *(u_int *)pde; 5429 npde = opde & ~PG_PDE_CACHE; 5430 npde |= cache_bits; 5431 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); 5432 } 5433 5434 /* 5435 * Map a set of physical memory pages into the kernel virtual 5436 * address space. Return a pointer to where it is mapped. This 5437 * routine is intended to be used for mapping device memory, 5438 * NOT real memory. 5439 */ 5440 static void * 5441 __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode, 5442 int flags) 5443 { 5444 struct pmap_preinit_mapping *ppim; 5445 vm_offset_t va, offset; 5446 vm_page_t m; 5447 vm_size_t tmpsize; 5448 int i; 5449 5450 offset = pa & PAGE_MASK; 5451 size = round_page(offset + size); 5452 pa = pa & PG_FRAME; 5453 5454 if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) { 5455 va = pa + PMAP_MAP_LOW; 5456 if ((flags & MAPDEV_SETATTR) == 0) 5457 return ((void *)(va + offset)); 5458 } else if (!pmap_initialized) { 5459 va = 0; 5460 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5461 ppim = pmap_preinit_mapping + i; 5462 if (ppim->va == 0) { 5463 ppim->pa = pa; 5464 ppim->sz = size; 5465 ppim->mode = mode; 5466 ppim->va = virtual_avail; 5467 virtual_avail += size; 5468 va = ppim->va; 5469 break; 5470 } 5471 } 5472 if (va == 0) 5473 panic("%s: too many preinit mappings", __func__); 5474 } else { 5475 /* 5476 * If we have a preinit mapping, re-use it. 5477 */ 5478 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5479 ppim = pmap_preinit_mapping + i; 5480 if (ppim->pa == pa && ppim->sz == size && 5481 (ppim->mode == mode || 5482 (flags & MAPDEV_SETATTR) == 0)) 5483 return ((void *)(ppim->va + offset)); 5484 } 5485 va = kva_alloc(size); 5486 if (va == 0) 5487 panic("%s: Couldn't allocate KVA", __func__); 5488 } 5489 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) { 5490 if ((flags & MAPDEV_SETATTR) == 0 && pmap_initialized) { 5491 m = PHYS_TO_VM_PAGE(pa); 5492 if (m != NULL && VM_PAGE_TO_PHYS(m) == pa) { 5493 pmap_kenter_attr(va + tmpsize, pa + tmpsize, 5494 m->md.pat_mode); 5495 continue; 5496 } 5497 } 5498 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 5499 } 5500 pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize); 5501 pmap_invalidate_cache_range(va, va + size); 5502 return ((void *)(va + offset)); 5503 } 5504 5505 static void 5506 __CONCAT(PMTYPE, unmapdev)(vm_offset_t va, vm_size_t size) 5507 { 5508 struct pmap_preinit_mapping *ppim; 5509 vm_offset_t offset; 5510 int i; 5511 5512 if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE) 5513 return; 5514 offset = va & PAGE_MASK; 5515 size = round_page(offset + size); 5516 va = trunc_page(va); 5517 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5518 ppim = pmap_preinit_mapping + i; 5519 if (ppim->va == va && ppim->sz == size) { 5520 if (pmap_initialized) 5521 return; 5522 ppim->pa = 0; 5523 ppim->va = 0; 5524 ppim->sz = 0; 5525 ppim->mode = 0; 5526 if (va + size == virtual_avail) 5527 virtual_avail = va; 5528 return; 5529 } 5530 } 5531 if (pmap_initialized) { 5532 pmap_qremove(va, atop(size)); 5533 kva_free(va, size); 5534 } 5535 } 5536 5537 /* 5538 * Sets the memory attribute for the specified page. 5539 */ 5540 static void 5541 __CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma) 5542 { 5543 5544 m->md.pat_mode = ma; 5545 if ((m->flags & PG_FICTITIOUS) != 0) 5546 return; 5547 5548 /* 5549 * If "m" is a normal page, flush it from the cache. 5550 * See pmap_invalidate_cache_range(). 5551 * 5552 * First, try to find an existing mapping of the page by sf 5553 * buffer. sf_buf_invalidate_cache() modifies mapping and 5554 * flushes the cache. 5555 */ 5556 if (sf_buf_invalidate_cache(m)) 5557 return; 5558 5559 /* 5560 * If page is not mapped by sf buffer, but CPU does not 5561 * support self snoop, map the page transient and do 5562 * invalidation. In the worst case, whole cache is flushed by 5563 * pmap_invalidate_cache_range(). 5564 */ 5565 if ((cpu_feature & CPUID_SS) == 0) 5566 pmap_flush_page(m); 5567 } 5568 5569 static void 5570 __CONCAT(PMTYPE, flush_page)(vm_page_t m) 5571 { 5572 pt_entry_t *cmap_pte2; 5573 struct pcpu *pc; 5574 vm_offset_t sva, eva; 5575 bool useclflushopt; 5576 5577 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0; 5578 if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) { 5579 sched_pin(); 5580 pc = get_pcpu(); 5581 cmap_pte2 = pc->pc_cmap_pte2; 5582 mtx_lock(&pc->pc_cmap_lock); 5583 if (*cmap_pte2) 5584 panic("pmap_flush_page: CMAP2 busy"); 5585 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | 5586 PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 5587 0); 5588 invlcaddr(pc->pc_cmap_addr2); 5589 sva = (vm_offset_t)pc->pc_cmap_addr2; 5590 eva = sva + PAGE_SIZE; 5591 5592 /* 5593 * Use mfence or sfence despite the ordering implied by 5594 * mtx_{un,}lock() because clflush on non-Intel CPUs 5595 * and clflushopt are not guaranteed to be ordered by 5596 * any other instruction. 5597 */ 5598 if (useclflushopt) 5599 sfence(); 5600 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5601 mfence(); 5602 for (; sva < eva; sva += cpu_clflush_line_size) { 5603 if (useclflushopt) 5604 clflushopt(sva); 5605 else 5606 clflush(sva); 5607 } 5608 if (useclflushopt) 5609 sfence(); 5610 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5611 mfence(); 5612 *cmap_pte2 = 0; 5613 sched_unpin(); 5614 mtx_unlock(&pc->pc_cmap_lock); 5615 } else 5616 pmap_invalidate_cache(); 5617 } 5618 5619 /* 5620 * Changes the specified virtual address range's memory type to that given by 5621 * the parameter "mode". The specified virtual address range must be 5622 * completely contained within either the kernel map. 5623 * 5624 * Returns zero if the change completed successfully, and either EINVAL or 5625 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 5626 * of the virtual address range was not mapped, and ENOMEM is returned if 5627 * there was insufficient memory available to complete the change. 5628 */ 5629 static int 5630 __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode) 5631 { 5632 vm_offset_t base, offset, tmpva; 5633 pd_entry_t *pde; 5634 pt_entry_t *pte; 5635 int cache_bits_pte, cache_bits_pde; 5636 boolean_t changed; 5637 5638 base = trunc_page(va); 5639 offset = va & PAGE_MASK; 5640 size = round_page(offset + size); 5641 5642 /* 5643 * Only supported on kernel virtual addresses above the recursive map. 5644 */ 5645 if (base < VM_MIN_KERNEL_ADDRESS) 5646 return (EINVAL); 5647 5648 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); 5649 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); 5650 changed = FALSE; 5651 5652 /* 5653 * Pages that aren't mapped aren't supported. Also break down 5654 * 2/4MB pages into 4KB pages if required. 5655 */ 5656 PMAP_LOCK(kernel_pmap); 5657 for (tmpva = base; tmpva < base + size; ) { 5658 pde = pmap_pde(kernel_pmap, tmpva); 5659 if (*pde == 0) { 5660 PMAP_UNLOCK(kernel_pmap); 5661 return (EINVAL); 5662 } 5663 if (*pde & PG_PS) { 5664 /* 5665 * If the current 2/4MB page already has 5666 * the required memory type, then we need not 5667 * demote this page. Just increment tmpva to 5668 * the next 2/4MB page frame. 5669 */ 5670 if ((*pde & PG_PDE_CACHE) == cache_bits_pde) { 5671 tmpva = trunc_4mpage(tmpva) + NBPDR; 5672 continue; 5673 } 5674 5675 /* 5676 * If the current offset aligns with a 2/4MB 5677 * page frame and there is at least 2/4MB left 5678 * within the range, then we need not break 5679 * down this page into 4KB pages. 5680 */ 5681 if ((tmpva & PDRMASK) == 0 && 5682 tmpva + PDRMASK < base + size) { 5683 tmpva += NBPDR; 5684 continue; 5685 } 5686 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) { 5687 PMAP_UNLOCK(kernel_pmap); 5688 return (ENOMEM); 5689 } 5690 } 5691 pte = vtopte(tmpva); 5692 if (*pte == 0) { 5693 PMAP_UNLOCK(kernel_pmap); 5694 return (EINVAL); 5695 } 5696 tmpva += PAGE_SIZE; 5697 } 5698 PMAP_UNLOCK(kernel_pmap); 5699 5700 /* 5701 * Ok, all the pages exist, so run through them updating their 5702 * cache mode if required. 5703 */ 5704 for (tmpva = base; tmpva < base + size; ) { 5705 pde = pmap_pde(kernel_pmap, tmpva); 5706 if (*pde & PG_PS) { 5707 if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { 5708 pmap_pde_attr(pde, cache_bits_pde); 5709 changed = TRUE; 5710 } 5711 tmpva = trunc_4mpage(tmpva) + NBPDR; 5712 } else { 5713 pte = vtopte(tmpva); 5714 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { 5715 pmap_pte_attr(pte, cache_bits_pte); 5716 changed = TRUE; 5717 } 5718 tmpva += PAGE_SIZE; 5719 } 5720 } 5721 5722 /* 5723 * Flush CPU caches to make sure any data isn't cached that 5724 * shouldn't be, etc. 5725 */ 5726 if (changed) { 5727 pmap_invalidate_range_int(kernel_pmap, base, tmpva); 5728 pmap_invalidate_cache_range(base, tmpva); 5729 } 5730 return (0); 5731 } 5732 5733 /* 5734 * Perform the pmap work for mincore(2). If the page is not both referenced and 5735 * modified by this pmap, returns its physical address so that the caller can 5736 * find other mappings. 5737 */ 5738 static int 5739 __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) 5740 { 5741 pd_entry_t pde; 5742 pt_entry_t pte; 5743 vm_paddr_t pa; 5744 int val; 5745 5746 PMAP_LOCK(pmap); 5747 pde = *pmap_pde(pmap, addr); 5748 if (pde != 0) { 5749 if ((pde & PG_PS) != 0) { 5750 pte = pde; 5751 /* Compute the physical address of the 4KB page. */ 5752 pa = ((pde & PG_PS_FRAME) | (addr & PDRMASK)) & 5753 PG_FRAME; 5754 val = MINCORE_PSIND(1); 5755 } else { 5756 pte = pmap_pte_ufast(pmap, addr, pde); 5757 pa = pte & PG_FRAME; 5758 val = 0; 5759 } 5760 } else { 5761 pte = 0; 5762 pa = 0; 5763 val = 0; 5764 } 5765 if ((pte & PG_V) != 0) { 5766 val |= MINCORE_INCORE; 5767 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5768 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 5769 if ((pte & PG_A) != 0) 5770 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 5771 } 5772 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 5773 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 5774 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 5775 *pap = pa; 5776 } 5777 PMAP_UNLOCK(pmap); 5778 return (val); 5779 } 5780 5781 static void 5782 __CONCAT(PMTYPE, activate)(struct thread *td) 5783 { 5784 pmap_t pmap, oldpmap; 5785 u_int cpuid; 5786 u_int32_t cr3; 5787 5788 critical_enter(); 5789 pmap = vmspace_pmap(td->td_proc->p_vmspace); 5790 oldpmap = PCPU_GET(curpmap); 5791 cpuid = PCPU_GET(cpuid); 5792 #if defined(SMP) 5793 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 5794 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5795 #else 5796 CPU_CLR(cpuid, &oldpmap->pm_active); 5797 CPU_SET(cpuid, &pmap->pm_active); 5798 #endif 5799 #ifdef PMAP_PAE_COMP 5800 cr3 = vtophys(pmap->pm_pdpt); 5801 #else 5802 cr3 = vtophys(pmap->pm_pdir); 5803 #endif 5804 /* 5805 * pmap_activate is for the current thread on the current cpu 5806 */ 5807 td->td_pcb->pcb_cr3 = cr3; 5808 PCPU_SET(curpmap, pmap); 5809 critical_exit(); 5810 } 5811 5812 static void 5813 __CONCAT(PMTYPE, activate_boot)(pmap_t pmap) 5814 { 5815 u_int cpuid; 5816 5817 cpuid = PCPU_GET(cpuid); 5818 #if defined(SMP) 5819 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5820 #else 5821 CPU_SET(cpuid, &pmap->pm_active); 5822 #endif 5823 PCPU_SET(curpmap, pmap); 5824 } 5825 5826 /* 5827 * Increase the starting virtual address of the given mapping if a 5828 * different alignment might result in more superpage mappings. 5829 */ 5830 static void 5831 __CONCAT(PMTYPE, align_superpage)(vm_object_t object, vm_ooffset_t offset, 5832 vm_offset_t *addr, vm_size_t size) 5833 { 5834 vm_offset_t superpage_offset; 5835 5836 if (size < NBPDR) 5837 return; 5838 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 5839 offset += ptoa(object->pg_color); 5840 superpage_offset = offset & PDRMASK; 5841 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 5842 (*addr & PDRMASK) == superpage_offset) 5843 return; 5844 if ((*addr & PDRMASK) < superpage_offset) 5845 *addr = (*addr & ~PDRMASK) + superpage_offset; 5846 else 5847 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 5848 } 5849 5850 static vm_offset_t 5851 __CONCAT(PMTYPE, quick_enter_page)(vm_page_t m) 5852 { 5853 vm_offset_t qaddr; 5854 pt_entry_t *pte; 5855 5856 critical_enter(); 5857 qaddr = PCPU_GET(qmap_addr); 5858 pte = vtopte(qaddr); 5859 5860 KASSERT(*pte == 0, 5861 ("pmap_quick_enter_page: PTE busy %#jx", (uintmax_t)*pte)); 5862 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 5863 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0); 5864 invlpg(qaddr); 5865 5866 return (qaddr); 5867 } 5868 5869 static void 5870 __CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr) 5871 { 5872 vm_offset_t qaddr; 5873 pt_entry_t *pte; 5874 5875 qaddr = PCPU_GET(qmap_addr); 5876 pte = vtopte(qaddr); 5877 5878 KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); 5879 KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); 5880 5881 *pte = 0; 5882 critical_exit(); 5883 } 5884 5885 static vmem_t *pmap_trm_arena; 5886 static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS; 5887 static int trm_guard = PAGE_SIZE; 5888 5889 static int 5890 pmap_trm_import(void *unused __unused, vmem_size_t size, int flags, 5891 vmem_addr_t *addrp) 5892 { 5893 vm_page_t m; 5894 vmem_addr_t af, addr, prev_addr; 5895 pt_entry_t *trm_pte; 5896 5897 prev_addr = atomic_load_long(&pmap_trm_arena_last); 5898 size = round_page(size) + trm_guard; 5899 for (;;) { 5900 if (prev_addr + size < prev_addr || prev_addr + size < size || 5901 prev_addr + size > PMAP_TRM_MAX_ADDRESS) 5902 return (ENOMEM); 5903 addr = prev_addr + size; 5904 if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr)) 5905 break; 5906 } 5907 prev_addr += trm_guard; 5908 trm_pte = PTmap + atop(prev_addr); 5909 for (af = prev_addr; af < addr; af += PAGE_SIZE) { 5910 m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK); 5911 pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | 5912 PG_M | PG_A | PG_RW | PG_V | pgeflag | 5913 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); 5914 } 5915 *addrp = prev_addr; 5916 return (0); 5917 } 5918 5919 void 5920 pmap_init_trm(void) 5921 { 5922 vm_page_t pd_m; 5923 5924 TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard); 5925 if ((trm_guard & PAGE_MASK) != 0) 5926 trm_guard = 0; 5927 pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); 5928 vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); 5929 pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK | 5930 VM_ALLOC_ZERO); 5931 PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | 5932 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); 5933 } 5934 5935 static void * 5936 __CONCAT(PMTYPE, trm_alloc)(size_t size, int flags) 5937 { 5938 vmem_addr_t res; 5939 int error; 5940 5941 MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0); 5942 error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int), 5943 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res); 5944 if (error != 0) 5945 return (NULL); 5946 if ((flags & M_ZERO) != 0) 5947 bzero((void *)res, size); 5948 return ((void *)res); 5949 } 5950 5951 static void 5952 __CONCAT(PMTYPE, trm_free)(void *addr, size_t size) 5953 { 5954 5955 vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4)); 5956 } 5957 5958 static void 5959 __CONCAT(PMTYPE, ksetrw)(vm_offset_t va) 5960 { 5961 5962 *vtopte(va) |= PG_RW; 5963 } 5964 5965 static void 5966 __CONCAT(PMTYPE, remap_lowptdi)(bool enable) 5967 { 5968 5969 PTD[KPTDI] = enable ? PTD[LOWPTDI] : 0; 5970 invltlb_glob(); 5971 } 5972 5973 static vm_offset_t 5974 __CONCAT(PMTYPE, get_map_low)(void) 5975 { 5976 5977 return (PMAP_MAP_LOW); 5978 } 5979 5980 static vm_offset_t 5981 __CONCAT(PMTYPE, get_vm_maxuser_address)(void) 5982 { 5983 5984 return (VM_MAXUSER_ADDRESS); 5985 } 5986 5987 static vm_paddr_t 5988 __CONCAT(PMTYPE, pg_frame)(vm_paddr_t pa) 5989 { 5990 5991 return (pa & PG_FRAME); 5992 } 5993 5994 static void 5995 __CONCAT(PMTYPE, sf_buf_map)(struct sf_buf *sf) 5996 { 5997 pt_entry_t opte, *ptep; 5998 5999 /* 6000 * Update the sf_buf's virtual-to-physical mapping, flushing the 6001 * virtual address from the TLB. Since the reference count for 6002 * the sf_buf's old mapping was zero, that mapping is not 6003 * currently in use. Consequently, there is no need to exchange 6004 * the old and new PTEs atomically, even under PAE. 6005 */ 6006 ptep = vtopte(sf->kva); 6007 opte = *ptep; 6008 *ptep = VM_PAGE_TO_PHYS(sf->m) | PG_RW | PG_V | 6009 pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, 0); 6010 6011 /* 6012 * Avoid unnecessary TLB invalidations: If the sf_buf's old 6013 * virtual-to-physical mapping was not used, then any processor 6014 * that has invalidated the sf_buf's virtual address from its TLB 6015 * since the last used mapping need not invalidate again. 6016 */ 6017 #ifdef SMP 6018 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6019 CPU_ZERO(&sf->cpumask); 6020 #else 6021 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6022 pmap_invalidate_page_int(kernel_pmap, sf->kva); 6023 #endif 6024 } 6025 6026 static void 6027 __CONCAT(PMTYPE, cp_slow0_map)(vm_offset_t kaddr, int plen, vm_page_t *ma) 6028 { 6029 pt_entry_t *pte; 6030 int i; 6031 6032 for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) { 6033 *pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) | 6034 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]), 6035 FALSE); 6036 invlpg(kaddr + ptoa(i)); 6037 } 6038 } 6039 6040 static u_int 6041 __CONCAT(PMTYPE, get_kcr3)(void) 6042 { 6043 6044 #ifdef PMAP_PAE_COMP 6045 return ((u_int)IdlePDPT); 6046 #else 6047 return ((u_int)IdlePTD); 6048 #endif 6049 } 6050 6051 static u_int 6052 __CONCAT(PMTYPE, get_cr3)(pmap_t pmap) 6053 { 6054 6055 #ifdef PMAP_PAE_COMP 6056 return ((u_int)vtophys(pmap->pm_pdpt)); 6057 #else 6058 return ((u_int)vtophys(pmap->pm_pdir)); 6059 #endif 6060 } 6061 6062 static caddr_t 6063 __CONCAT(PMTYPE, cmap3)(vm_paddr_t pa, u_int pte_bits) 6064 { 6065 pt_entry_t *pte; 6066 6067 pte = CMAP3; 6068 *pte = pa | pte_bits; 6069 invltlb(); 6070 return (CADDR3); 6071 } 6072 6073 static void 6074 __CONCAT(PMTYPE, basemem_setup)(u_int basemem) 6075 { 6076 pt_entry_t *pte; 6077 int i; 6078 6079 /* 6080 * Map pages between basemem and ISA_HOLE_START, if any, r/w into 6081 * the vm86 page table so that vm86 can scribble on them using 6082 * the vm86 map too. XXX: why 2 ways for this and only 1 way for 6083 * page 0, at least as initialized here? 6084 */ 6085 pte = (pt_entry_t *)vm86paddr; 6086 for (i = basemem / 4; i < 160; i++) 6087 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 6088 } 6089 6090 struct bios16_pmap_handle { 6091 pt_entry_t *pte; 6092 pd_entry_t *ptd; 6093 pt_entry_t orig_ptd; 6094 }; 6095 6096 static void * 6097 __CONCAT(PMTYPE, bios16_enter)(void) 6098 { 6099 struct bios16_pmap_handle *h; 6100 6101 /* 6102 * no page table, so create one and install it. 6103 */ 6104 h = malloc(sizeof(struct bios16_pmap_handle), M_TEMP, M_WAITOK); 6105 h->pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 6106 h->ptd = IdlePTD; 6107 *h->pte = vm86phystk | PG_RW | PG_V; 6108 h->orig_ptd = *h->ptd; 6109 *h->ptd = vtophys(h->pte) | PG_RW | PG_V; 6110 pmap_invalidate_all_int(kernel_pmap); /* XXX insurance for now */ 6111 return (h); 6112 } 6113 6114 static void 6115 __CONCAT(PMTYPE, bios16_leave)(void *arg) 6116 { 6117 struct bios16_pmap_handle *h; 6118 6119 h = arg; 6120 *h->ptd = h->orig_ptd; /* remove page table */ 6121 /* 6122 * XXX only needs to be invlpg(0) but that doesn't work on the 386 6123 */ 6124 pmap_invalidate_all_int(kernel_pmap); 6125 free(h->pte, M_TEMP); /* ... and free it */ 6126 } 6127 6128 struct pmap_kernel_map_range { 6129 vm_offset_t sva; 6130 pt_entry_t attrs; 6131 int ptes; 6132 int pdes; 6133 int pdpes; 6134 }; 6135 6136 static void 6137 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range, 6138 vm_offset_t eva) 6139 { 6140 const char *mode; 6141 int i, pat_idx; 6142 6143 if (eva <= range->sva) 6144 return; 6145 6146 pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true); 6147 for (i = 0; i < PAT_INDEX_SIZE; i++) 6148 if (pat_index[i] == pat_idx) 6149 break; 6150 6151 switch (i) { 6152 case PAT_WRITE_BACK: 6153 mode = "WB"; 6154 break; 6155 case PAT_WRITE_THROUGH: 6156 mode = "WT"; 6157 break; 6158 case PAT_UNCACHEABLE: 6159 mode = "UC"; 6160 break; 6161 case PAT_UNCACHED: 6162 mode = "U-"; 6163 break; 6164 case PAT_WRITE_PROTECTED: 6165 mode = "WP"; 6166 break; 6167 case PAT_WRITE_COMBINING: 6168 mode = "WC"; 6169 break; 6170 default: 6171 printf("%s: unknown PAT mode %#x for range 0x%08x-0x%08x\n", 6172 __func__, pat_idx, range->sva, eva); 6173 mode = "??"; 6174 break; 6175 } 6176 6177 sbuf_printf(sb, "0x%08x-0x%08x r%c%c%c%c %s %d %d %d\n", 6178 range->sva, eva, 6179 (range->attrs & PG_RW) != 0 ? 'w' : '-', 6180 (range->attrs & pg_nx) != 0 ? '-' : 'x', 6181 (range->attrs & PG_U) != 0 ? 'u' : 's', 6182 (range->attrs & PG_G) != 0 ? 'g' : '-', 6183 mode, range->pdpes, range->pdes, range->ptes); 6184 6185 /* Reset to sentinel value. */ 6186 range->sva = 0xffffffff; 6187 } 6188 6189 /* 6190 * Determine whether the attributes specified by a page table entry match those 6191 * being tracked by the current range. This is not quite as simple as a direct 6192 * flag comparison since some PAT modes have multiple representations. 6193 */ 6194 static bool 6195 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs) 6196 { 6197 pt_entry_t diff, mask; 6198 6199 mask = pg_nx | PG_G | PG_RW | PG_U | PG_PDE_CACHE; 6200 diff = (range->attrs ^ attrs) & mask; 6201 if (diff == 0) 6202 return (true); 6203 if ((diff & ~PG_PDE_PAT) == 0 && 6204 pmap_pat_index(kernel_pmap, range->attrs, true) == 6205 pmap_pat_index(kernel_pmap, attrs, true)) 6206 return (true); 6207 return (false); 6208 } 6209 6210 static void 6211 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va, 6212 pt_entry_t attrs) 6213 { 6214 6215 memset(range, 0, sizeof(*range)); 6216 range->sva = va; 6217 range->attrs = attrs; 6218 } 6219 6220 /* 6221 * Given a leaf PTE, derive the mapping's attributes. If they do not match 6222 * those of the current run, dump the address range and its attributes, and 6223 * begin a new run. 6224 */ 6225 static void 6226 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, 6227 vm_offset_t va, pd_entry_t pde, pt_entry_t pte) 6228 { 6229 pt_entry_t attrs; 6230 6231 attrs = pde & (PG_RW | PG_U | pg_nx); 6232 6233 if ((pde & PG_PS) != 0) { 6234 attrs |= pde & (PG_G | PG_PDE_CACHE); 6235 } else if (pte != 0) { 6236 attrs |= pte & pg_nx; 6237 attrs &= pg_nx | (pte & (PG_RW | PG_U)); 6238 attrs |= pte & (PG_G | PG_PTE_CACHE); 6239 6240 /* Canonicalize by always using the PDE PAT bit. */ 6241 if ((attrs & PG_PTE_PAT) != 0) 6242 attrs ^= PG_PDE_PAT | PG_PTE_PAT; 6243 } 6244 6245 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { 6246 sysctl_kmaps_dump(sb, range, va); 6247 sysctl_kmaps_reinit(range, va, attrs); 6248 } 6249 } 6250 6251 static int 6252 __CONCAT(PMTYPE, sysctl_kmaps)(SYSCTL_HANDLER_ARGS) 6253 { 6254 struct pmap_kernel_map_range range; 6255 struct sbuf sbuf, *sb; 6256 pd_entry_t pde; 6257 pt_entry_t *pt, pte; 6258 vm_offset_t sva; 6259 int error; 6260 u_int i, k; 6261 6262 error = sysctl_wire_old_buffer(req, 0); 6263 if (error != 0) 6264 return (error); 6265 sb = &sbuf; 6266 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req); 6267 6268 /* Sentinel value. */ 6269 range.sva = 0xffffffff; 6270 6271 /* 6272 * Iterate over the kernel page tables without holding the 6273 * kernel pmap lock. Kernel page table pages are never freed, 6274 * so at worst we will observe inconsistencies in the output. 6275 */ 6276 for (sva = 0, i = 0; i < NPTEPG * NPGPTD * NPDEPG ;) { 6277 if (i == 0) 6278 sbuf_printf(sb, "\nLow PDE:\n"); 6279 else if (i == LOWPTDI * NPTEPG) 6280 sbuf_printf(sb, "Low PDE dup:\n"); 6281 else if (i == PTDPTDI * NPTEPG) 6282 sbuf_printf(sb, "Recursive map:\n"); 6283 else if (i == KERNPTDI * NPTEPG) 6284 sbuf_printf(sb, "Kernel base:\n"); 6285 else if (i == TRPTDI * NPTEPG) 6286 sbuf_printf(sb, "Trampoline:\n"); 6287 pde = IdlePTD[sva >> PDRSHIFT]; 6288 if ((pde & PG_V) == 0) { 6289 sva = rounddown2(sva, NBPDR); 6290 sysctl_kmaps_dump(sb, &range, sva); 6291 sva += NBPDR; 6292 i += NPTEPG; 6293 continue; 6294 } 6295 if ((pde & PG_PS) != 0) { 6296 sysctl_kmaps_check(sb, &range, sva, pde, 0); 6297 range.pdes++; 6298 sva += NBPDR; 6299 i += NPTEPG; 6300 continue; 6301 } 6302 for (pt = vtopte(sva), k = 0; k < NPTEPG; i++, k++, pt++, 6303 sva += PAGE_SIZE) { 6304 pte = *pt; 6305 if ((pte & PG_V) == 0) { 6306 sysctl_kmaps_dump(sb, &range, sva); 6307 continue; 6308 } 6309 sysctl_kmaps_check(sb, &range, sva, pde, pte); 6310 range.ptes++; 6311 } 6312 } 6313 6314 error = sbuf_finish(sb); 6315 sbuf_delete(sb); 6316 return (error); 6317 } 6318 6319 #define PMM(a) \ 6320 .pm_##a = __CONCAT(PMTYPE, a), 6321 6322 struct pmap_methods __CONCAT(PMTYPE, methods) = { 6323 PMM(ksetrw) 6324 PMM(remap_lower) 6325 PMM(remap_lowptdi) 6326 PMM(align_superpage) 6327 PMM(quick_enter_page) 6328 PMM(quick_remove_page) 6329 PMM(trm_alloc) 6330 PMM(trm_free) 6331 PMM(get_map_low) 6332 PMM(get_vm_maxuser_address) 6333 PMM(kextract) 6334 PMM(pg_frame) 6335 PMM(sf_buf_map) 6336 PMM(cp_slow0_map) 6337 PMM(get_kcr3) 6338 PMM(get_cr3) 6339 PMM(cmap3) 6340 PMM(basemem_setup) 6341 PMM(set_nx) 6342 PMM(bios16_enter) 6343 PMM(bios16_leave) 6344 PMM(bootstrap) 6345 PMM(is_valid_memattr) 6346 PMM(cache_bits) 6347 PMM(ps_enabled) 6348 PMM(pinit0) 6349 PMM(pinit) 6350 PMM(activate) 6351 PMM(activate_boot) 6352 PMM(advise) 6353 PMM(clear_modify) 6354 PMM(change_attr) 6355 PMM(mincore) 6356 PMM(copy) 6357 PMM(copy_page) 6358 PMM(copy_pages) 6359 PMM(zero_page) 6360 PMM(zero_page_area) 6361 PMM(enter) 6362 PMM(enter_object) 6363 PMM(enter_quick) 6364 PMM(kenter_temporary) 6365 PMM(object_init_pt) 6366 PMM(unwire) 6367 PMM(page_exists_quick) 6368 PMM(page_wired_mappings) 6369 PMM(page_is_mapped) 6370 PMM(remove_pages) 6371 PMM(is_modified) 6372 PMM(is_prefaultable) 6373 PMM(is_referenced) 6374 PMM(remove_write) 6375 PMM(ts_referenced) 6376 PMM(mapdev_attr) 6377 PMM(unmapdev) 6378 PMM(page_set_memattr) 6379 PMM(extract) 6380 PMM(extract_and_hold) 6381 PMM(map) 6382 PMM(qenter) 6383 PMM(qremove) 6384 PMM(release) 6385 PMM(remove) 6386 PMM(protect) 6387 PMM(remove_all) 6388 PMM(init) 6389 PMM(init_pat) 6390 PMM(growkernel) 6391 PMM(invalidate_page) 6392 PMM(invalidate_range) 6393 PMM(invalidate_all) 6394 PMM(invalidate_cache) 6395 PMM(flush_page) 6396 PMM(kenter) 6397 PMM(kremove) 6398 PMM(sysctl_kmaps) 6399 }; 6400