1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * the Systems Programming Group of the University of Utah Computer 15 * Science Department and William Jolitz of UUNET Technologies Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. All advertising materials mentioning features or use of this software 26 * must display the following acknowledgement: 27 * This product includes software developed by the University of 28 * California, Berkeley and its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 46 */ 47 /*- 48 * Copyright (c) 2003 Networks Associates Technology, Inc. 49 * All rights reserved. 50 * Copyright (c) 2018 The FreeBSD Foundation 51 * All rights reserved. 52 * 53 * This software was developed for the FreeBSD Project by Jake Burkholder, 54 * Safeport Network Services, and Network Associates Laboratories, the 55 * Security Research Division of Network Associates, Inc. under 56 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 57 * CHATS research program. 58 * 59 * Portions of this software were developed by 60 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from 61 * the FreeBSD Foundation. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 1. Redistributions of source code must retain the above copyright 67 * notice, this list of conditions and the following disclaimer. 68 * 2. Redistributions in binary form must reproduce the above copyright 69 * notice, this list of conditions and the following disclaimer in the 70 * documentation and/or other materials provided with the distribution. 71 * 72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 82 * SUCH DAMAGE. 83 */ 84 85 #include <sys/cdefs.h> 86 /* 87 * Manages physical address maps. 88 * 89 * Since the information managed by this module is 90 * also stored by the logical address mapping module, 91 * this module may throw away valid virtual-to-physical 92 * mappings at almost any time. However, invalidations 93 * of virtual-to-physical mappings must be done as 94 * requested. 95 * 96 * In order to cope with hardware architectures which 97 * make virtual-to-physical map invalidates expensive, 98 * this module may delay invalidate or reduced protection 99 * operations until such time as they are actually 100 * necessary. This module is given full information as 101 * to which processors are currently using which maps, 102 * and to when physical maps must be made correct. 103 */ 104 105 #include "opt_apic.h" 106 #include "opt_cpu.h" 107 #include "opt_pmap.h" 108 #include "opt_smp.h" 109 #include "opt_vm.h" 110 111 #include <sys/param.h> 112 #include <sys/systm.h> 113 #include <sys/kernel.h> 114 #include <sys/ktr.h> 115 #include <sys/lock.h> 116 #include <sys/malloc.h> 117 #include <sys/mman.h> 118 #include <sys/msgbuf.h> 119 #include <sys/mutex.h> 120 #include <sys/proc.h> 121 #include <sys/rwlock.h> 122 #include <sys/sbuf.h> 123 #include <sys/sf_buf.h> 124 #include <sys/sx.h> 125 #include <sys/vmmeter.h> 126 #include <sys/sched.h> 127 #include <sys/sysctl.h> 128 #include <sys/smp.h> 129 #include <sys/vmem.h> 130 131 #include <vm/vm.h> 132 #include <vm/vm_param.h> 133 #include <vm/vm_kern.h> 134 #include <vm/vm_page.h> 135 #include <vm/vm_map.h> 136 #include <vm/vm_object.h> 137 #include <vm/vm_extern.h> 138 #include <vm/vm_pageout.h> 139 #include <vm/vm_pager.h> 140 #include <vm/vm_phys.h> 141 #include <vm/vm_radix.h> 142 #include <vm/vm_reserv.h> 143 #include <vm/uma.h> 144 145 #ifdef DEV_APIC 146 #include <sys/bus.h> 147 #include <machine/intr_machdep.h> 148 #include <x86/apicvar.h> 149 #endif 150 #include <x86/ifunc.h> 151 #include <machine/bootinfo.h> 152 #include <machine/cpu.h> 153 #include <machine/cputypes.h> 154 #include <machine/md_var.h> 155 #include <machine/pcb.h> 156 #include <machine/specialreg.h> 157 #ifdef SMP 158 #include <machine/smp.h> 159 #endif 160 #include <machine/pmap_base.h> 161 162 #if !defined(DIAGNOSTIC) 163 #ifdef __GNUC_GNU_INLINE__ 164 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline 165 #else 166 #define PMAP_INLINE extern inline 167 #endif 168 #else 169 #define PMAP_INLINE 170 #endif 171 172 #ifdef PV_STATS 173 #define PV_STAT(x) do { x ; } while (0) 174 #else 175 #define PV_STAT(x) do { } while (0) 176 #endif 177 178 #define pa_index(pa) ((pa) >> PDRSHIFT) 179 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 180 181 /* 182 * PTmap is recursive pagemap at top of virtual address space. 183 * Within PTmap, the page directory can be found (third indirection). 184 */ 185 #define PTmap ((pt_entry_t *)(PTDPTDI << PDRSHIFT)) 186 #define PTD ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE))) 187 #define PTDpde ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE) + \ 188 (PTDPTDI * PDESIZE))) 189 190 /* 191 * Translate a virtual address to the kernel virtual address of its page table 192 * entry (PTE). This can be used recursively. If the address of a PTE as 193 * previously returned by this macro is itself given as the argument, then the 194 * address of the page directory entry (PDE) that maps the PTE will be 195 * returned. 196 * 197 * This macro may be used before pmap_bootstrap() is called. 198 */ 199 #define vtopte(va) (PTmap + i386_btop(va)) 200 201 /* 202 * Get PDEs and PTEs for user/kernel address space 203 */ 204 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 205 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 206 207 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 208 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 209 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 210 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 211 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 212 213 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ 214 atomic_clear_int((u_int *)(pte), PG_W)) 215 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 216 217 static int pgeflag = 0; /* PG_G or-in */ 218 static int pseflag = 0; /* PG_PS or-in */ 219 220 static int nkpt = NKPT; 221 222 #ifdef PMAP_PAE_COMP 223 pt_entry_t pg_nx; 224 static uma_zone_t pdptzone; 225 #else 226 #define pg_nx 0 227 #endif 228 229 _Static_assert(VM_MAXUSER_ADDRESS == VADDR(TRPTDI, 0), "VM_MAXUSER_ADDRESS"); 230 _Static_assert(VM_MAX_KERNEL_ADDRESS <= VADDR(PTDPTDI, 0), 231 "VM_MAX_KERNEL_ADDRESS"); 232 _Static_assert(PMAP_MAP_LOW == VADDR(LOWPTDI, 0), "PMAP_MAP_LOW"); 233 _Static_assert(KERNLOAD == (KERNPTDI << PDRSHIFT), "KERNLOAD"); 234 235 extern int pat_works; 236 extern int pg_ps_enabled; 237 238 extern int elf32_nxstack; 239 240 #define PAT_INDEX_SIZE 8 241 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ 242 243 /* 244 * pmap_mapdev support pre initialization (i.e. console) 245 */ 246 #define PMAP_PREINIT_MAPPING_COUNT 8 247 static struct pmap_preinit_mapping { 248 vm_paddr_t pa; 249 vm_offset_t va; 250 vm_size_t sz; 251 int mode; 252 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; 253 static int pmap_initialized; 254 255 static struct rwlock_padalign pvh_global_lock; 256 257 /* 258 * Data for the pv entry allocation mechanism 259 */ 260 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 261 extern int pv_entry_max, pv_entry_count; 262 static int pv_entry_high_water = 0; 263 static struct md_page *pv_table; 264 extern int shpgperproc; 265 266 static struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 267 static int pv_maxchunks; /* How many chunks we have KVA for */ 268 static vm_offset_t pv_vafree; /* freelist stored in the PTE */ 269 270 /* 271 * All those kernel PT submaps that BSD is so fond of 272 */ 273 static pt_entry_t *CMAP3; 274 static pd_entry_t *KPTD; 275 static caddr_t CADDR3; 276 277 /* 278 * Crashdump maps. 279 */ 280 static caddr_t crashdumpmap; 281 282 static pt_entry_t *PMAP1 = NULL, *PMAP2, *PMAP3; 283 static pt_entry_t *PADDR1 = NULL, *PADDR2, *PADDR3; 284 #ifdef SMP 285 static int PMAP1cpu, PMAP3cpu; 286 extern int PMAP1changedcpu; 287 #endif 288 extern int PMAP1changed; 289 extern int PMAP1unchanged; 290 static struct mtx PMAP2mutex; 291 292 /* 293 * Internal flags for pmap_enter()'s helper functions. 294 */ 295 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 296 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 297 298 static void free_pv_chunk(struct pv_chunk *pc); 299 static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 300 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 301 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 302 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, 303 u_int flags); 304 #if VM_NRESERVLEVEL > 0 305 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 306 #endif 307 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 308 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 309 vm_offset_t va); 310 static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); 311 312 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); 313 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 314 static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, 315 vm_prot_t prot); 316 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, 317 u_int flags, vm_page_t m); 318 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 319 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 320 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); 321 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, 322 pd_entry_t pde); 323 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); 324 static boolean_t pmap_is_modified_pvh(struct md_page *pvh); 325 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh); 326 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 327 static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); 328 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); 329 #if VM_NRESERVLEVEL > 0 330 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 331 #endif 332 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, 333 vm_prot_t prot); 334 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); 335 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 336 struct spglist *free); 337 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 338 struct spglist *free); 339 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); 340 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free); 341 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 342 struct spglist *free); 343 static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va); 344 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 345 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 346 vm_page_t m); 347 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 348 pd_entry_t newpde); 349 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); 350 351 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); 352 353 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); 354 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); 355 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 356 static void pmap_pte_release(pt_entry_t *pte); 357 static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); 358 #ifdef PMAP_PAE_COMP 359 static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, 360 uint8_t *flags, int wait); 361 #endif 362 static void pmap_init_trm(void); 363 static void pmap_invalidate_all_int(pmap_t pmap); 364 365 static __inline void pagezero(void *page); 366 367 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 368 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 369 370 extern char _end[]; 371 extern u_long physfree; /* phys addr of next free page */ 372 extern u_long vm86phystk;/* PA of vm86/bios stack */ 373 extern u_long vm86paddr;/* address of vm86 region */ 374 extern int vm86pa; /* phys addr of vm86 region */ 375 extern u_long KERNend; /* phys addr end of kernel (just after bss) */ 376 #ifdef PMAP_PAE_COMP 377 pd_entry_t *IdlePTD_pae; /* phys addr of kernel PTD */ 378 pdpt_entry_t *IdlePDPT; /* phys addr of kernel PDPT */ 379 pt_entry_t *KPTmap_pae; /* address of kernel page tables */ 380 #define IdlePTD IdlePTD_pae 381 #define KPTmap KPTmap_pae 382 #else 383 pd_entry_t *IdlePTD_nopae; 384 pt_entry_t *KPTmap_nopae; 385 #define IdlePTD IdlePTD_nopae 386 #define KPTmap KPTmap_nopae 387 #endif 388 extern u_long KPTphys; /* phys addr of kernel page tables */ 389 extern u_long tramp_idleptd; 390 391 static u_long 392 allocpages(u_int cnt, u_long *physfree) 393 { 394 u_long res; 395 396 res = *physfree; 397 *physfree += PAGE_SIZE * cnt; 398 bzero((void *)res, PAGE_SIZE * cnt); 399 return (res); 400 } 401 402 static void 403 pmap_cold_map(u_long pa, u_long va, u_long cnt) 404 { 405 pt_entry_t *pt; 406 407 for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0; 408 cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE) 409 *pt = pa | PG_V | PG_RW | PG_A | PG_M; 410 } 411 412 static void 413 pmap_cold_mapident(u_long pa, u_long cnt) 414 { 415 416 pmap_cold_map(pa, pa, cnt); 417 } 418 419 _Static_assert(LOWPTDI * 2 * NBPDR == KERNBASE, 420 "Broken double-map of zero PTD"); 421 422 static void 423 __CONCAT(PMTYPE, remap_lower)(bool enable) 424 { 425 int i; 426 427 for (i = 0; i < LOWPTDI; i++) 428 IdlePTD[i] = enable ? IdlePTD[LOWPTDI + i] : 0; 429 load_cr3(rcr3()); /* invalidate TLB */ 430 } 431 432 /* 433 * Called from locore.s before paging is enabled. Sets up the first 434 * kernel page table. Since kernel is mapped with PA == VA, this code 435 * does not require relocations. 436 */ 437 void 438 __CONCAT(PMTYPE, cold)(void) 439 { 440 pt_entry_t *pt; 441 u_long a; 442 u_int cr3, ncr4; 443 444 physfree = (u_long)&_end; 445 if (bootinfo.bi_esymtab != 0) 446 physfree = bootinfo.bi_esymtab; 447 if (bootinfo.bi_kernend != 0) 448 physfree = bootinfo.bi_kernend; 449 physfree = roundup2(physfree, NBPDR); 450 KERNend = physfree; 451 452 /* Allocate Kernel Page Tables */ 453 KPTphys = allocpages(NKPT, &physfree); 454 KPTmap = (pt_entry_t *)KPTphys; 455 456 /* Allocate Page Table Directory */ 457 #ifdef PMAP_PAE_COMP 458 /* XXX only need 32 bytes (easier for now) */ 459 IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree); 460 #endif 461 IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree); 462 463 /* 464 * Allocate KSTACK. Leave a guard page between IdlePTD and 465 * proc0kstack, to control stack overflow for thread0 and 466 * prevent corruption of the page table. We leak the guard 467 * physical memory due to 1:1 mappings. 468 */ 469 allocpages(1, &physfree); 470 proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree); 471 472 /* vm86/bios stack */ 473 vm86phystk = allocpages(1, &physfree); 474 475 /* pgtable + ext + IOPAGES */ 476 vm86paddr = vm86pa = allocpages(3, &physfree); 477 478 /* Install page tables into PTD. Page table page 1 is wasted. */ 479 for (a = 0; a < NKPT; a++) 480 IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M; 481 482 #ifdef PMAP_PAE_COMP 483 /* PAE install PTD pointers into PDPT */ 484 for (a = 0; a < NPGPTD; a++) 485 IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V; 486 #endif 487 488 /* 489 * Install recursive mapping for kernel page tables into 490 * itself. 491 */ 492 for (a = 0; a < NPGPTD; a++) 493 IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V | 494 PG_RW; 495 496 /* 497 * Initialize page table pages mapping physical address zero 498 * through the (physical) end of the kernel. Many of these 499 * pages must be reserved, and we reserve them all and map 500 * them linearly for convenience. We do this even if we've 501 * enabled PSE above; we'll just switch the corresponding 502 * kernel PDEs before we turn on paging. 503 * 504 * This and all other page table entries allow read and write 505 * access for various reasons. Kernel mappings never have any 506 * access restrictions. 507 */ 508 pmap_cold_mapident(0, atop(NBPDR) * LOWPTDI); 509 pmap_cold_map(0, NBPDR * LOWPTDI, atop(NBPDR) * LOWPTDI); 510 pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE)); 511 512 /* Map page table directory */ 513 #ifdef PMAP_PAE_COMP 514 pmap_cold_mapident((u_long)IdlePDPT, 1); 515 #endif 516 pmap_cold_mapident((u_long)IdlePTD, NPGPTD); 517 518 /* Map early KPTmap. It is really pmap_cold_mapident. */ 519 pmap_cold_map(KPTphys, (u_long)KPTmap, NKPT); 520 521 /* Map proc0kstack */ 522 pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES); 523 /* ISA hole already mapped */ 524 525 pmap_cold_mapident(vm86phystk, 1); 526 pmap_cold_mapident(vm86pa, 3); 527 528 /* Map page 0 into the vm86 page table */ 529 *(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V; 530 531 /* ...likewise for the ISA hole for vm86 */ 532 for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0; 533 a < atop(ISA_HOLE_LENGTH); a++, pt++) 534 *pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A | 535 PG_M | PG_V; 536 537 /* Enable PSE, PGE, VME, and PAE if configured. */ 538 ncr4 = 0; 539 if ((cpu_feature & CPUID_PSE) != 0) { 540 ncr4 |= CR4_PSE; 541 pseflag = PG_PS; 542 /* 543 * Superpage mapping of the kernel text. Existing 4k 544 * page table pages are wasted. 545 */ 546 for (a = KERNBASE; a < KERNend; a += NBPDR) 547 IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M | 548 PG_RW | PG_V; 549 } 550 if ((cpu_feature & CPUID_PGE) != 0) { 551 ncr4 |= CR4_PGE; 552 pgeflag = PG_G; 553 } 554 ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0; 555 #ifdef PMAP_PAE_COMP 556 ncr4 |= CR4_PAE; 557 #endif 558 if (ncr4 != 0) 559 load_cr4(rcr4() | ncr4); 560 561 /* Now enable paging */ 562 #ifdef PMAP_PAE_COMP 563 cr3 = (u_int)IdlePDPT; 564 if ((cpu_feature & CPUID_PAT) == 0) 565 wbinvd(); 566 #else 567 cr3 = (u_int)IdlePTD; 568 #endif 569 tramp_idleptd = cr3; 570 load_cr3(cr3); 571 load_cr0(rcr0() | CR0_PG); 572 573 /* 574 * Now running relocated at KERNBASE where the system is 575 * linked to run. 576 */ 577 578 /* 579 * Remove the lowest part of the double mapping of low memory 580 * to get some null pointer checks. 581 */ 582 __CONCAT(PMTYPE, remap_lower)(false); 583 584 kernel_vm_end = /* 0 + */ NKPT * NBPDR; 585 #ifdef PMAP_PAE_COMP 586 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_PAE; 587 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_PAE; 588 i386_pmap_PDRSHIFT = PDRSHIFT_PAE; 589 #else 590 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_NOPAE; 591 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_NOPAE; 592 i386_pmap_PDRSHIFT = PDRSHIFT_NOPAE; 593 #endif 594 } 595 596 static void 597 __CONCAT(PMTYPE, set_nx)(void) 598 { 599 600 #ifdef PMAP_PAE_COMP 601 if ((amd_feature & AMDID_NX) == 0) 602 return; 603 pg_nx = PG_NX; 604 elf32_nxstack = 1; 605 /* EFER.EFER_NXE is set in initializecpu(). */ 606 #endif 607 } 608 609 /* 610 * Bootstrap the system enough to run with virtual memory. 611 * 612 * On the i386 this is called after pmap_cold() created initial 613 * kernel page table and enabled paging, and just syncs the pmap 614 * module with what has already been done. 615 */ 616 static void 617 __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr) 618 { 619 vm_offset_t va; 620 pt_entry_t *pte, *unused __unused; 621 struct pcpu *pc; 622 u_long res; 623 int i; 624 625 res = atop(firstaddr - (vm_paddr_t)KERNLOAD); 626 627 /* 628 * Add a physical memory segment (vm_phys_seg) corresponding to the 629 * preallocated kernel page table pages so that vm_page structures 630 * representing these pages will be created. The vm_page structures 631 * are required for promotion of the corresponding kernel virtual 632 * addresses to superpage mappings. 633 */ 634 vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 635 636 /* 637 * Initialize the first available kernel virtual address. 638 * However, using "firstaddr" may waste a few pages of the 639 * kernel virtual address space, because pmap_cold() may not 640 * have mapped every physical page that it allocated. 641 * Preferably, pmap_cold() would provide a first unused 642 * virtual address in addition to "firstaddr". 643 */ 644 virtual_avail = (vm_offset_t)firstaddr; 645 virtual_end = VM_MAX_KERNEL_ADDRESS; 646 647 /* 648 * Initialize the kernel pmap (which is statically allocated). 649 * Count bootstrap data as being resident in case any of this data is 650 * later unmapped (using pmap_remove()) and freed. 651 */ 652 PMAP_LOCK_INIT(kernel_pmap); 653 kernel_pmap->pm_pdir = IdlePTD; 654 #ifdef PMAP_PAE_COMP 655 kernel_pmap->pm_pdpt = IdlePDPT; 656 #endif 657 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 658 kernel_pmap->pm_stats.resident_count = res; 659 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 660 vm_radix_init(&kernel_pmap->pm_root); 661 662 /* 663 * Initialize the global pv list lock. 664 */ 665 rw_init(&pvh_global_lock, "pmap pv global"); 666 667 /* 668 * Reserve some special page table entries/VA space for temporary 669 * mapping of pages. 670 */ 671 #define SYSMAP(c, p, v, n) \ 672 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 673 674 va = virtual_avail; 675 pte = vtopte(va); 676 677 /* 678 * Initialize temporary map objects on the current CPU for use 679 * during early boot. 680 * CMAP1/CMAP2 are used for zeroing and copying pages. 681 * CMAP3 is used for the boot-time memory test. 682 */ 683 pc = get_pcpu(); 684 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 685 SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1) 686 SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1) 687 SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1) 688 689 SYSMAP(caddr_t, CMAP3, CADDR3, 1); 690 691 /* 692 * Crashdump maps. 693 */ 694 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 695 696 /* 697 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 698 */ 699 SYSMAP(caddr_t, unused, ptvmmap, 1) 700 701 /* 702 * msgbufp is used to map the system message buffer. 703 */ 704 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 705 706 /* 707 * KPTmap is used by pmap_kextract(). 708 * 709 * KPTmap is first initialized by pmap_cold(). However, that initial 710 * KPTmap can only support NKPT page table pages. Here, a larger 711 * KPTmap is created that can support KVA_PAGES page table pages. 712 */ 713 SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) 714 715 for (i = 0; i < NKPT; i++) 716 KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V; 717 718 /* 719 * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), 720 * respectively. 721 */ 722 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 723 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 724 SYSMAP(pt_entry_t *, PMAP3, PADDR3, 1) 725 726 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 727 728 virtual_avail = va; 729 730 /* 731 * Initialize the PAT MSR if present. 732 * pmap_init_pat() clears and sets CR4_PGE, which, as a 733 * side-effect, invalidates stale PG_G TLB entries that might 734 * have been created in our pre-boot environment. We assume 735 * that PAT support implies PGE and in reverse, PGE presence 736 * comes with PAT. Both features were added for Pentium Pro. 737 */ 738 pmap_init_pat(); 739 } 740 741 static void 742 pmap_init_reserved_pages(void) 743 { 744 struct pcpu *pc; 745 vm_offset_t pages; 746 int i; 747 748 #ifdef PMAP_PAE_COMP 749 if (!pae_mode) 750 return; 751 #else 752 if (pae_mode) 753 return; 754 #endif 755 CPU_FOREACH(i) { 756 pc = pcpu_find(i); 757 mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | 758 MTX_NEW); 759 pc->pc_copyout_maddr = kva_alloc(ptoa(2)); 760 if (pc->pc_copyout_maddr == 0) 761 panic("unable to allocate non-sleepable copyout KVA"); 762 sx_init(&pc->pc_copyout_slock, "cpslk"); 763 pc->pc_copyout_saddr = kva_alloc(ptoa(2)); 764 if (pc->pc_copyout_saddr == 0) 765 panic("unable to allocate sleepable copyout KVA"); 766 pc->pc_pmap_eh_va = kva_alloc(ptoa(1)); 767 if (pc->pc_pmap_eh_va == 0) 768 panic("unable to allocate pmap_extract_and_hold KVA"); 769 pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va); 770 771 /* 772 * Skip if the mappings have already been initialized, 773 * i.e. this is the BSP. 774 */ 775 if (pc->pc_cmap_addr1 != 0) 776 continue; 777 778 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 779 pages = kva_alloc(PAGE_SIZE * 3); 780 if (pages == 0) 781 panic("unable to allocate CMAP KVA"); 782 pc->pc_cmap_pte1 = vtopte(pages); 783 pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); 784 pc->pc_cmap_addr1 = (caddr_t)pages; 785 pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); 786 pc->pc_qmap_addr = pages + ptoa(2); 787 } 788 } 789 790 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 791 792 /* 793 * Setup the PAT MSR. 794 */ 795 static void 796 __CONCAT(PMTYPE, init_pat)(void) 797 { 798 int pat_table[PAT_INDEX_SIZE]; 799 uint64_t pat_msr; 800 u_long cr0, cr4; 801 int i; 802 803 /* Set default PAT index table. */ 804 for (i = 0; i < PAT_INDEX_SIZE; i++) 805 pat_table[i] = -1; 806 pat_table[PAT_WRITE_BACK] = 0; 807 pat_table[PAT_WRITE_THROUGH] = 1; 808 pat_table[PAT_UNCACHEABLE] = 3; 809 pat_table[PAT_WRITE_COMBINING] = 3; 810 pat_table[PAT_WRITE_PROTECTED] = 3; 811 pat_table[PAT_UNCACHED] = 3; 812 813 /* 814 * Bail if this CPU doesn't implement PAT. 815 * We assume that PAT support implies PGE. 816 */ 817 if ((cpu_feature & CPUID_PAT) == 0) { 818 for (i = 0; i < PAT_INDEX_SIZE; i++) 819 pat_index[i] = pat_table[i]; 820 pat_works = 0; 821 return; 822 } 823 824 /* 825 * Due to some Intel errata, we can only safely use the lower 4 826 * PAT entries. 827 * 828 * Intel Pentium III Processor Specification Update 829 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 830 * or Mode C Paging) 831 * 832 * Intel Pentium IV Processor Specification Update 833 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 834 */ 835 if (cpu_vendor_id == CPU_VENDOR_INTEL && 836 !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) 837 pat_works = 0; 838 839 /* Initialize default PAT entries. */ 840 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | 841 PAT_VALUE(1, PAT_WRITE_THROUGH) | 842 PAT_VALUE(2, PAT_UNCACHED) | 843 PAT_VALUE(3, PAT_UNCACHEABLE) | 844 PAT_VALUE(4, PAT_WRITE_BACK) | 845 PAT_VALUE(5, PAT_WRITE_THROUGH) | 846 PAT_VALUE(6, PAT_UNCACHED) | 847 PAT_VALUE(7, PAT_UNCACHEABLE); 848 849 if (pat_works) { 850 /* 851 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. 852 * Program 5 and 6 as WP and WC. 853 * Leave 4 and 7 as WB and UC. 854 */ 855 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); 856 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | 857 PAT_VALUE(6, PAT_WRITE_COMBINING); 858 pat_table[PAT_UNCACHED] = 2; 859 pat_table[PAT_WRITE_PROTECTED] = 5; 860 pat_table[PAT_WRITE_COMBINING] = 6; 861 } else { 862 /* 863 * Just replace PAT Index 2 with WC instead of UC-. 864 */ 865 pat_msr &= ~PAT_MASK(2); 866 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 867 pat_table[PAT_WRITE_COMBINING] = 2; 868 } 869 870 /* Disable PGE. */ 871 cr4 = rcr4(); 872 load_cr4(cr4 & ~CR4_PGE); 873 874 /* Disable caches (CD = 1, NW = 0). */ 875 cr0 = rcr0(); 876 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 877 878 /* Flushes caches and TLBs. */ 879 wbinvd(); 880 invltlb(); 881 882 /* Update PAT and index table. */ 883 wrmsr(MSR_PAT, pat_msr); 884 for (i = 0; i < PAT_INDEX_SIZE; i++) 885 pat_index[i] = pat_table[i]; 886 887 /* Flush caches and TLBs again. */ 888 wbinvd(); 889 invltlb(); 890 891 /* Restore caches and PGE. */ 892 load_cr0(cr0); 893 load_cr4(cr4); 894 } 895 896 #ifdef PMAP_PAE_COMP 897 static void * 898 pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 899 int wait) 900 { 901 902 /* Inform UMA that this allocator uses kernel_map/object. */ 903 *flags = UMA_SLAB_KERNEL; 904 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 905 bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 906 } 907 #endif 908 909 /* 910 * Abuse the pte nodes for unmapped kva to thread a kva freelist through. 911 * Requirements: 912 * - Must deal with pages in order to ensure that none of the PG_* bits 913 * are ever set, PG_V in particular. 914 * - Assumes we can write to ptes without pte_store() atomic ops, even 915 * on PAE systems. This should be ok. 916 * - Assumes nothing will ever test these addresses for 0 to indicate 917 * no mapping instead of correctly checking PG_V. 918 * - Assumes a vm_offset_t will fit in a pte (true for i386). 919 * Because PG_V is never set, there can be no mappings to invalidate. 920 */ 921 static vm_offset_t 922 pmap_ptelist_alloc(vm_offset_t *head) 923 { 924 pt_entry_t *pte; 925 vm_offset_t va; 926 927 va = *head; 928 if (va == 0) 929 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 930 pte = vtopte(va); 931 *head = *pte; 932 if (*head & PG_V) 933 panic("pmap_ptelist_alloc: va with PG_V set!"); 934 *pte = 0; 935 return (va); 936 } 937 938 static void 939 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 940 { 941 pt_entry_t *pte; 942 943 if (va & PG_V) 944 panic("pmap_ptelist_free: freeing va with PG_V set!"); 945 pte = vtopte(va); 946 *pte = *head; /* virtual! PG_V is 0 though */ 947 *head = va; 948 } 949 950 static void 951 pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 952 { 953 int i; 954 vm_offset_t va; 955 956 *head = 0; 957 for (i = npages - 1; i >= 0; i--) { 958 va = (vm_offset_t)base + i * PAGE_SIZE; 959 pmap_ptelist_free(head, va); 960 } 961 } 962 963 /* 964 * Initialize the pmap module. 965 * Called by vm_init, to initialize any structures that the pmap 966 * system needs to map virtual memory. 967 */ 968 static void 969 __CONCAT(PMTYPE, init)(void) 970 { 971 struct pmap_preinit_mapping *ppim; 972 vm_page_t mpte; 973 vm_size_t s; 974 int i, pv_npg; 975 976 /* 977 * Initialize the vm page array entries for the kernel pmap's 978 * page table pages. 979 */ 980 PMAP_LOCK(kernel_pmap); 981 for (i = 0; i < NKPT; i++) { 982 mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); 983 KASSERT(mpte >= vm_page_array && 984 mpte < &vm_page_array[vm_page_array_size], 985 ("pmap_init: page table page is out of range")); 986 mpte->pindex = i + KPTDI; 987 mpte->phys_addr = KPTphys + ptoa(i); 988 mpte->ref_count = 1; 989 990 /* 991 * Collect the page table pages that were replaced by a 2/4MB 992 * page. They are filled with equivalent 4KB page mappings. 993 */ 994 if (pseflag != 0 && 995 KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && 996 pmap_insert_pt_page(kernel_pmap, mpte, true)) 997 panic("pmap_init: pmap_insert_pt_page failed"); 998 } 999 PMAP_UNLOCK(kernel_pmap); 1000 vm_wire_add(NKPT); 1001 1002 /* 1003 * Initialize the address space (zone) for the pv entries. Set a 1004 * high water mark so that the system can recover from excessive 1005 * numbers of pv entries. 1006 */ 1007 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1008 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1009 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1010 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1011 pv_entry_high_water = 9 * (pv_entry_max / 10); 1012 1013 /* 1014 * If the kernel is running on a virtual machine, then it must assume 1015 * that MCA is enabled by the hypervisor. Moreover, the kernel must 1016 * be prepared for the hypervisor changing the vendor and family that 1017 * are reported by CPUID. Consequently, the workaround for AMD Family 1018 * 10h Erratum 383 is enabled if the processor's feature set does not 1019 * include at least one feature that is only supported by older Intel 1020 * or newer AMD processors. 1021 */ 1022 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 && 1023 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | 1024 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | 1025 AMDID2_FMA4)) == 0) 1026 workaround_erratum383 = 1; 1027 1028 /* 1029 * Are large page mappings supported and enabled? 1030 */ 1031 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 1032 if (pseflag == 0) 1033 pg_ps_enabled = 0; 1034 else if (pg_ps_enabled) { 1035 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1036 ("pmap_init: can't assign to pagesizes[1]")); 1037 pagesizes[1] = NBPDR; 1038 } 1039 1040 /* 1041 * Calculate the size of the pv head table for superpages. 1042 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1043 */ 1044 pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end - 1045 PAGE_SIZE) / NBPDR + 1; 1046 1047 /* 1048 * Allocate memory for the pv head table for superpages. 1049 */ 1050 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1051 s = round_page(s); 1052 pv_table = kmem_malloc(s, M_WAITOK | M_ZERO); 1053 for (i = 0; i < pv_npg; i++) 1054 TAILQ_INIT(&pv_table[i].pv_list); 1055 1056 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1057 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1058 if (pv_chunkbase == NULL) 1059 panic("pmap_init: not enough kvm for pv chunks"); 1060 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1061 #ifdef PMAP_PAE_COMP 1062 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 1063 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 1064 UMA_ZONE_CONTIG | UMA_ZONE_VM | UMA_ZONE_NOFREE); 1065 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 1066 #endif 1067 1068 pmap_initialized = 1; 1069 pmap_init_trm(); 1070 1071 if (!bootverbose) 1072 return; 1073 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 1074 ppim = pmap_preinit_mapping + i; 1075 if (ppim->va == 0) 1076 continue; 1077 printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, 1078 (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); 1079 } 1080 1081 } 1082 1083 extern u_long pmap_pde_demotions; 1084 extern u_long pmap_pde_mappings; 1085 extern u_long pmap_pde_p_failures; 1086 extern u_long pmap_pde_promotions; 1087 1088 /*************************************************** 1089 * Low level helper routines..... 1090 ***************************************************/ 1091 1092 static boolean_t 1093 __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode) 1094 { 1095 1096 return (mode >= 0 && mode < PAT_INDEX_SIZE && 1097 pat_index[(int)mode] >= 0); 1098 } 1099 1100 /* 1101 * Determine the appropriate bits to set in a PTE or PDE for a specified 1102 * caching mode. 1103 */ 1104 static int 1105 __CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde) 1106 { 1107 int cache_bits, pat_flag, pat_idx; 1108 1109 if (!pmap_is_valid_memattr(pmap, mode)) 1110 panic("Unknown caching mode %d\n", mode); 1111 1112 /* The PAT bit is different for PTE's and PDE's. */ 1113 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1114 1115 /* Map the caching mode to a PAT index. */ 1116 pat_idx = pat_index[mode]; 1117 1118 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 1119 cache_bits = 0; 1120 if (pat_idx & 0x4) 1121 cache_bits |= pat_flag; 1122 if (pat_idx & 0x2) 1123 cache_bits |= PG_NC_PCD; 1124 if (pat_idx & 0x1) 1125 cache_bits |= PG_NC_PWT; 1126 return (cache_bits); 1127 } 1128 1129 static int 1130 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde) 1131 { 1132 int pat_flag, pat_idx; 1133 1134 if ((cpu_feature & CPUID_PAT) == 0) 1135 return (0); 1136 1137 pat_idx = 0; 1138 /* The PAT bit is different for PTE's and PDE's. */ 1139 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1140 1141 if ((pte & pat_flag) != 0) 1142 pat_idx |= 0x4; 1143 if ((pte & PG_NC_PCD) != 0) 1144 pat_idx |= 0x2; 1145 if ((pte & PG_NC_PWT) != 0) 1146 pat_idx |= 0x1; 1147 1148 /* See pmap_init_pat(). */ 1149 if (pat_works) { 1150 if (pat_idx == 4) 1151 pat_idx = 0; 1152 if (pat_idx == 7) 1153 pat_idx = 3; 1154 } else { 1155 /* XXXKIB */ 1156 } 1157 1158 return (pat_idx); 1159 } 1160 1161 static bool 1162 __CONCAT(PMTYPE, ps_enabled)(pmap_t pmap __unused) 1163 { 1164 1165 return (pg_ps_enabled); 1166 } 1167 1168 /* 1169 * The caller is responsible for maintaining TLB consistency. 1170 */ 1171 static void 1172 pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) 1173 { 1174 pd_entry_t *pde; 1175 1176 pde = pmap_pde(kernel_pmap, va); 1177 pde_store(pde, newpde); 1178 } 1179 1180 /* 1181 * After changing the page size for the specified virtual address in the page 1182 * table, flush the corresponding entries from the processor's TLB. Only the 1183 * calling processor's TLB is affected. 1184 * 1185 * The calling thread must be pinned to a processor. 1186 */ 1187 static void 1188 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) 1189 { 1190 1191 if ((newpde & PG_PS) == 0) 1192 /* Demotion: flush a specific 2MB page mapping. */ 1193 invlpg(va); 1194 else /* if ((newpde & PG_G) == 0) */ 1195 /* 1196 * Promotion: flush every 4KB page mapping from the TLB 1197 * because there are too many to flush individually. 1198 */ 1199 invltlb(); 1200 } 1201 1202 #ifdef SMP 1203 1204 static void 1205 pmap_curcpu_cb_dummy(pmap_t pmap __unused, vm_offset_t addr1 __unused, 1206 vm_offset_t addr2 __unused) 1207 { 1208 } 1209 1210 /* 1211 * For SMP, these functions have to use the IPI mechanism for coherence. 1212 * 1213 * N.B.: Before calling any of the following TLB invalidation functions, 1214 * the calling processor must ensure that all stores updating a non- 1215 * kernel page table are globally performed. Otherwise, another 1216 * processor could cache an old, pre-update entry without being 1217 * invalidated. This can happen one of two ways: (1) The pmap becomes 1218 * active on another processor after its pm_active field is checked by 1219 * one of the following functions but before a store updating the page 1220 * table is globally performed. (2) The pmap becomes active on another 1221 * processor before its pm_active field is checked but due to 1222 * speculative loads one of the following functions stills reads the 1223 * pmap as inactive on the other processor. 1224 * 1225 * The kernel page table is exempt because its pm_active field is 1226 * immutable. The kernel page table is always active on every 1227 * processor. 1228 */ 1229 static void 1230 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1231 { 1232 cpuset_t *mask, other_cpus; 1233 u_int cpuid; 1234 1235 sched_pin(); 1236 if (pmap == kernel_pmap) { 1237 invlpg(va); 1238 mask = &all_cpus; 1239 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1240 mask = &all_cpus; 1241 } else { 1242 cpuid = PCPU_GET(cpuid); 1243 other_cpus = all_cpus; 1244 CPU_CLR(cpuid, &other_cpus); 1245 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1246 mask = &other_cpus; 1247 } 1248 smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy); 1249 sched_unpin(); 1250 } 1251 1252 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ 1253 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) 1254 1255 static void 1256 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1257 { 1258 cpuset_t *mask, other_cpus; 1259 vm_offset_t addr; 1260 u_int cpuid; 1261 1262 if (eva - sva >= PMAP_INVLPG_THRESHOLD) { 1263 pmap_invalidate_all_int(pmap); 1264 return; 1265 } 1266 1267 sched_pin(); 1268 if (pmap == kernel_pmap) { 1269 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1270 invlpg(addr); 1271 mask = &all_cpus; 1272 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1273 mask = &all_cpus; 1274 } else { 1275 cpuid = PCPU_GET(cpuid); 1276 other_cpus = all_cpus; 1277 CPU_CLR(cpuid, &other_cpus); 1278 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1279 mask = &other_cpus; 1280 } 1281 smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy); 1282 sched_unpin(); 1283 } 1284 1285 static void 1286 pmap_invalidate_all_int(pmap_t pmap) 1287 { 1288 cpuset_t *mask, other_cpus; 1289 u_int cpuid; 1290 1291 sched_pin(); 1292 if (pmap == kernel_pmap) { 1293 invltlb(); 1294 mask = &all_cpus; 1295 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1296 mask = &all_cpus; 1297 } else { 1298 cpuid = PCPU_GET(cpuid); 1299 other_cpus = all_cpus; 1300 CPU_CLR(cpuid, &other_cpus); 1301 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1302 mask = &other_cpus; 1303 } 1304 smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy); 1305 sched_unpin(); 1306 } 1307 1308 static void 1309 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, 1310 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) 1311 { 1312 wbinvd(); 1313 } 1314 1315 static void 1316 __CONCAT(PMTYPE, invalidate_cache)(void) 1317 { 1318 smp_cache_flush(pmap_invalidate_cache_curcpu_cb); 1319 } 1320 1321 struct pde_action { 1322 cpuset_t invalidate; /* processors that invalidate their TLB */ 1323 vm_offset_t va; 1324 pd_entry_t *pde; 1325 pd_entry_t newpde; 1326 u_int store; /* processor that updates the PDE */ 1327 }; 1328 1329 static void 1330 pmap_update_pde_kernel(void *arg) 1331 { 1332 struct pde_action *act = arg; 1333 pd_entry_t *pde; 1334 1335 if (act->store == PCPU_GET(cpuid)) { 1336 pde = pmap_pde(kernel_pmap, act->va); 1337 pde_store(pde, act->newpde); 1338 } 1339 } 1340 1341 static void 1342 pmap_update_pde_user(void *arg) 1343 { 1344 struct pde_action *act = arg; 1345 1346 if (act->store == PCPU_GET(cpuid)) 1347 pde_store(act->pde, act->newpde); 1348 } 1349 1350 static void 1351 pmap_update_pde_teardown(void *arg) 1352 { 1353 struct pde_action *act = arg; 1354 1355 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) 1356 pmap_update_pde_invalidate(act->va, act->newpde); 1357 } 1358 1359 /* 1360 * Change the page size for the specified virtual address in a way that 1361 * prevents any possibility of the TLB ever having two entries that map the 1362 * same virtual address using different page sizes. This is the recommended 1363 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a 1364 * machine check exception for a TLB state that is improperly diagnosed as a 1365 * hardware error. 1366 */ 1367 static void 1368 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1369 { 1370 struct pde_action act; 1371 cpuset_t active, other_cpus; 1372 u_int cpuid; 1373 1374 sched_pin(); 1375 cpuid = PCPU_GET(cpuid); 1376 other_cpus = all_cpus; 1377 CPU_CLR(cpuid, &other_cpus); 1378 if (pmap == kernel_pmap) 1379 active = all_cpus; 1380 else 1381 active = pmap->pm_active; 1382 if (CPU_OVERLAP(&active, &other_cpus)) { 1383 act.store = cpuid; 1384 act.invalidate = active; 1385 act.va = va; 1386 act.pde = pde; 1387 act.newpde = newpde; 1388 CPU_SET(cpuid, &active); 1389 smp_rendezvous_cpus(active, 1390 smp_no_rendezvous_barrier, pmap == kernel_pmap ? 1391 pmap_update_pde_kernel : pmap_update_pde_user, 1392 pmap_update_pde_teardown, &act); 1393 } else { 1394 if (pmap == kernel_pmap) 1395 pmap_kenter_pde(va, newpde); 1396 else 1397 pde_store(pde, newpde); 1398 if (CPU_ISSET(cpuid, &active)) 1399 pmap_update_pde_invalidate(va, newpde); 1400 } 1401 sched_unpin(); 1402 } 1403 #else /* !SMP */ 1404 /* 1405 * Normal, non-SMP, 486+ invalidation functions. 1406 * We inline these within pmap.c for speed. 1407 */ 1408 static void 1409 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1410 { 1411 1412 if (pmap == kernel_pmap) 1413 invlpg(va); 1414 } 1415 1416 static void 1417 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1418 { 1419 vm_offset_t addr; 1420 1421 if (pmap == kernel_pmap) 1422 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1423 invlpg(addr); 1424 } 1425 1426 static void 1427 pmap_invalidate_all_int(pmap_t pmap) 1428 { 1429 1430 if (pmap == kernel_pmap) 1431 invltlb(); 1432 } 1433 1434 static void 1435 __CONCAT(PMTYPE, invalidate_cache)(void) 1436 { 1437 1438 wbinvd(); 1439 } 1440 1441 static void 1442 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1443 { 1444 1445 if (pmap == kernel_pmap) 1446 pmap_kenter_pde(va, newpde); 1447 else 1448 pde_store(pde, newpde); 1449 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1450 pmap_update_pde_invalidate(va, newpde); 1451 } 1452 #endif /* !SMP */ 1453 1454 static void 1455 __CONCAT(PMTYPE, invalidate_page)(pmap_t pmap, vm_offset_t va) 1456 { 1457 1458 pmap_invalidate_page_int(pmap, va); 1459 } 1460 1461 static void 1462 __CONCAT(PMTYPE, invalidate_range)(pmap_t pmap, vm_offset_t sva, 1463 vm_offset_t eva) 1464 { 1465 1466 pmap_invalidate_range_int(pmap, sva, eva); 1467 } 1468 1469 static void 1470 __CONCAT(PMTYPE, invalidate_all)(pmap_t pmap) 1471 { 1472 1473 pmap_invalidate_all_int(pmap); 1474 } 1475 1476 static void 1477 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1478 { 1479 1480 /* 1481 * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was 1482 * created by a promotion that did not invalidate the 512 or 1024 4KB 1483 * page mappings that might exist in the TLB. Consequently, at this 1484 * point, the TLB may hold both 4KB and 2- or 4MB page mappings for 1485 * the address range [va, va + NBPDR). Therefore, the entire range 1486 * must be invalidated here. In contrast, when PG_PROMOTED is clear, 1487 * the TLB will not hold any 4KB page mappings for the address range 1488 * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the 1489 * 2- or 4MB page mapping from the TLB. 1490 */ 1491 if ((pde & PG_PROMOTED) != 0) 1492 pmap_invalidate_range_int(pmap, va, va + NBPDR - 1); 1493 else 1494 pmap_invalidate_page_int(pmap, va); 1495 } 1496 1497 /* 1498 * Are we current address space or kernel? 1499 */ 1500 static __inline int 1501 pmap_is_current(pmap_t pmap) 1502 { 1503 1504 return (pmap == kernel_pmap); 1505 } 1506 1507 /* 1508 * If the given pmap is not the current or kernel pmap, the returned pte must 1509 * be released by passing it to pmap_pte_release(). 1510 */ 1511 static pt_entry_t * 1512 __CONCAT(PMTYPE, pte)(pmap_t pmap, vm_offset_t va) 1513 { 1514 pd_entry_t newpf; 1515 pd_entry_t *pde; 1516 1517 pde = pmap_pde(pmap, va); 1518 if (*pde & PG_PS) 1519 return (pde); 1520 if (*pde != 0) { 1521 /* are we current address space or kernel? */ 1522 if (pmap_is_current(pmap)) 1523 return (vtopte(va)); 1524 mtx_lock(&PMAP2mutex); 1525 newpf = *pde & PG_FRAME; 1526 if ((*PMAP2 & PG_FRAME) != newpf) { 1527 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; 1528 pmap_invalidate_page_int(kernel_pmap, 1529 (vm_offset_t)PADDR2); 1530 } 1531 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1532 } 1533 return (NULL); 1534 } 1535 1536 /* 1537 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1538 * being NULL. 1539 */ 1540 static __inline void 1541 pmap_pte_release(pt_entry_t *pte) 1542 { 1543 1544 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) 1545 mtx_unlock(&PMAP2mutex); 1546 } 1547 1548 /* 1549 * NB: The sequence of updating a page table followed by accesses to the 1550 * corresponding pages is subject to the situation described in the "AMD64 1551 * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23, 1552 * "7.3.1 Special Coherency Considerations". Therefore, issuing the INVLPG 1553 * right after modifying the PTE bits is crucial. 1554 */ 1555 static __inline void 1556 invlcaddr(void *caddr) 1557 { 1558 1559 invlpg((u_int)caddr); 1560 } 1561 1562 /* 1563 * Super fast pmap_pte routine best used when scanning 1564 * the pv lists. This eliminates many coarse-grained 1565 * invltlb calls. Note that many of the pv list 1566 * scans are across different pmaps. It is very wasteful 1567 * to do an entire invltlb for checking a single mapping. 1568 * 1569 * If the given pmap is not the current pmap, pvh_global_lock 1570 * must be held and curthread pinned to a CPU. 1571 */ 1572 static pt_entry_t * 1573 pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1574 { 1575 pd_entry_t newpf; 1576 pd_entry_t *pde; 1577 1578 pde = pmap_pde(pmap, va); 1579 if (*pde & PG_PS) 1580 return (pde); 1581 if (*pde != 0) { 1582 /* are we current address space or kernel? */ 1583 if (pmap_is_current(pmap)) 1584 return (vtopte(va)); 1585 rw_assert(&pvh_global_lock, RA_WLOCKED); 1586 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1587 newpf = *pde & PG_FRAME; 1588 if ((*PMAP1 & PG_FRAME) != newpf) { 1589 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; 1590 #ifdef SMP 1591 PMAP1cpu = PCPU_GET(cpuid); 1592 #endif 1593 invlcaddr(PADDR1); 1594 PMAP1changed++; 1595 } else 1596 #ifdef SMP 1597 if (PMAP1cpu != PCPU_GET(cpuid)) { 1598 PMAP1cpu = PCPU_GET(cpuid); 1599 invlcaddr(PADDR1); 1600 PMAP1changedcpu++; 1601 } else 1602 #endif 1603 PMAP1unchanged++; 1604 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1605 } 1606 return (0); 1607 } 1608 1609 static pt_entry_t * 1610 pmap_pte_quick3(pmap_t pmap, vm_offset_t va) 1611 { 1612 pd_entry_t newpf; 1613 pd_entry_t *pde; 1614 1615 pde = pmap_pde(pmap, va); 1616 if (*pde & PG_PS) 1617 return (pde); 1618 if (*pde != 0) { 1619 rw_assert(&pvh_global_lock, RA_WLOCKED); 1620 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1621 newpf = *pde & PG_FRAME; 1622 if ((*PMAP3 & PG_FRAME) != newpf) { 1623 *PMAP3 = newpf | PG_RW | PG_V | PG_A | PG_M; 1624 #ifdef SMP 1625 PMAP3cpu = PCPU_GET(cpuid); 1626 #endif 1627 invlcaddr(PADDR3); 1628 PMAP1changed++; 1629 } else 1630 #ifdef SMP 1631 if (PMAP3cpu != PCPU_GET(cpuid)) { 1632 PMAP3cpu = PCPU_GET(cpuid); 1633 invlcaddr(PADDR3); 1634 PMAP1changedcpu++; 1635 } else 1636 #endif 1637 PMAP1unchanged++; 1638 return (PADDR3 + (i386_btop(va) & (NPTEPG - 1))); 1639 } 1640 return (0); 1641 } 1642 1643 static pt_entry_t 1644 pmap_pte_ufast(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1645 { 1646 pt_entry_t *eh_ptep, pte, *ptep; 1647 1648 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1649 pde &= PG_FRAME; 1650 critical_enter(); 1651 eh_ptep = (pt_entry_t *)PCPU_GET(pmap_eh_ptep); 1652 if ((*eh_ptep & PG_FRAME) != pde) { 1653 *eh_ptep = pde | PG_RW | PG_V | PG_A | PG_M; 1654 invlcaddr((void *)PCPU_GET(pmap_eh_va)); 1655 } 1656 ptep = (pt_entry_t *)PCPU_GET(pmap_eh_va) + (i386_btop(va) & 1657 (NPTEPG - 1)); 1658 pte = *ptep; 1659 critical_exit(); 1660 return (pte); 1661 } 1662 1663 /* 1664 * Extract from the kernel page table the physical address that is mapped by 1665 * the given virtual address "va". 1666 * 1667 * This function may be used before pmap_bootstrap() is called. 1668 */ 1669 static vm_paddr_t 1670 __CONCAT(PMTYPE, kextract)(vm_offset_t va) 1671 { 1672 vm_paddr_t pa; 1673 1674 if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) { 1675 pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 1676 } else { 1677 /* 1678 * Beware of a concurrent promotion that changes the PDE at 1679 * this point! For example, vtopte() must not be used to 1680 * access the PTE because it would use the new PDE. It is, 1681 * however, safe to use the old PDE because the page table 1682 * page is preserved by the promotion. 1683 */ 1684 pa = KPTmap[i386_btop(va)]; 1685 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1686 } 1687 return (pa); 1688 } 1689 1690 /* 1691 * Routine: pmap_extract 1692 * Function: 1693 * Extract the physical page address associated 1694 * with the given map/virtual_address pair. 1695 */ 1696 static vm_paddr_t 1697 __CONCAT(PMTYPE, extract)(pmap_t pmap, vm_offset_t va) 1698 { 1699 vm_paddr_t rtval; 1700 pt_entry_t pte; 1701 pd_entry_t pde; 1702 1703 rtval = 0; 1704 PMAP_LOCK(pmap); 1705 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1706 if (pde != 0) { 1707 if ((pde & PG_PS) != 0) 1708 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); 1709 else { 1710 pte = pmap_pte_ufast(pmap, va, pde); 1711 rtval = (pte & PG_FRAME) | (va & PAGE_MASK); 1712 } 1713 } 1714 PMAP_UNLOCK(pmap); 1715 return (rtval); 1716 } 1717 1718 /* 1719 * Routine: pmap_extract_and_hold 1720 * Function: 1721 * Atomically extract and hold the physical page 1722 * with the given pmap and virtual address pair 1723 * if that mapping permits the given protection. 1724 */ 1725 static vm_page_t 1726 __CONCAT(PMTYPE, extract_and_hold)(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1727 { 1728 pd_entry_t pde; 1729 pt_entry_t pte; 1730 vm_page_t m; 1731 1732 m = NULL; 1733 PMAP_LOCK(pmap); 1734 pde = *pmap_pde(pmap, va); 1735 if (pde != 0) { 1736 if (pde & PG_PS) { 1737 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) 1738 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1739 (va & PDRMASK)); 1740 } else { 1741 pte = pmap_pte_ufast(pmap, va, pde); 1742 if (pte != 0 && 1743 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) 1744 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1745 } 1746 if (m != NULL && !vm_page_wire_mapped(m)) 1747 m = NULL; 1748 } 1749 PMAP_UNLOCK(pmap); 1750 return (m); 1751 } 1752 1753 /*************************************************** 1754 * Low level mapping routines..... 1755 ***************************************************/ 1756 1757 /* 1758 * Add a wired page to the kva. 1759 * Note: not SMP coherent. 1760 * 1761 * This function may be used before pmap_bootstrap() is called. 1762 */ 1763 static void 1764 __CONCAT(PMTYPE, kenter)(vm_offset_t va, vm_paddr_t pa) 1765 { 1766 pt_entry_t *pte; 1767 1768 pte = vtopte(va); 1769 pte_store(pte, pa | PG_RW | PG_V); 1770 } 1771 1772 static __inline void 1773 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1774 { 1775 pt_entry_t *pte; 1776 1777 pte = vtopte(va); 1778 pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, 1779 mode, 0)); 1780 } 1781 1782 /* 1783 * Remove a page from the kernel pagetables. 1784 * Note: not SMP coherent. 1785 * 1786 * This function may be used before pmap_bootstrap() is called. 1787 */ 1788 static void 1789 __CONCAT(PMTYPE, kremove)(vm_offset_t va) 1790 { 1791 pt_entry_t *pte; 1792 1793 pte = vtopte(va); 1794 pte_clear(pte); 1795 } 1796 1797 /* 1798 * Used to map a range of physical addresses into kernel 1799 * virtual address space. 1800 * 1801 * The value passed in '*virt' is a suggested virtual address for 1802 * the mapping. Architectures which can support a direct-mapped 1803 * physical to virtual region can return the appropriate address 1804 * within that region, leaving '*virt' unchanged. Other 1805 * architectures should map the pages starting at '*virt' and 1806 * update '*virt' with the first usable address after the mapped 1807 * region. 1808 */ 1809 static vm_offset_t 1810 __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1811 int prot) 1812 { 1813 vm_offset_t va, sva; 1814 vm_paddr_t superpage_offset; 1815 pd_entry_t newpde; 1816 1817 va = *virt; 1818 /* 1819 * Does the physical address range's size and alignment permit at 1820 * least one superpage mapping to be created? 1821 */ 1822 superpage_offset = start & PDRMASK; 1823 if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) { 1824 /* 1825 * Increase the starting virtual address so that its alignment 1826 * does not preclude the use of superpage mappings. 1827 */ 1828 if ((va & PDRMASK) < superpage_offset) 1829 va = (va & ~PDRMASK) + superpage_offset; 1830 else if ((va & PDRMASK) > superpage_offset) 1831 va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; 1832 } 1833 sva = va; 1834 while (start < end) { 1835 if ((start & PDRMASK) == 0 && end - start >= NBPDR && 1836 pseflag != 0) { 1837 KASSERT((va & PDRMASK) == 0, 1838 ("pmap_map: misaligned va %#x", va)); 1839 newpde = start | PG_PS | PG_RW | PG_V; 1840 pmap_kenter_pde(va, newpde); 1841 va += NBPDR; 1842 start += NBPDR; 1843 } else { 1844 pmap_kenter(va, start); 1845 va += PAGE_SIZE; 1846 start += PAGE_SIZE; 1847 } 1848 } 1849 pmap_invalidate_range_int(kernel_pmap, sva, va); 1850 *virt = va; 1851 return (sva); 1852 } 1853 1854 /* 1855 * Add a list of wired pages to the kva 1856 * this routine is only used for temporary 1857 * kernel mappings that do not need to have 1858 * page modification or references recorded. 1859 * Note that old mappings are simply written 1860 * over. The page *must* be wired. 1861 * Note: SMP coherent. Uses a ranged shootdown IPI. 1862 */ 1863 static void 1864 __CONCAT(PMTYPE, qenter)(vm_offset_t sva, vm_page_t *ma, int count) 1865 { 1866 pt_entry_t *endpte, oldpte, pa, *pte; 1867 vm_page_t m; 1868 1869 oldpte = 0; 1870 pte = vtopte(sva); 1871 endpte = pte + count; 1872 while (pte < endpte) { 1873 m = *ma++; 1874 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap, 1875 m->md.pat_mode, 0); 1876 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { 1877 oldpte |= *pte; 1878 pte_store(pte, pa | pg_nx | PG_RW | PG_V); 1879 } 1880 pte++; 1881 } 1882 if (__predict_false((oldpte & PG_V) != 0)) 1883 pmap_invalidate_range_int(kernel_pmap, sva, sva + count * 1884 PAGE_SIZE); 1885 } 1886 1887 /* 1888 * This routine tears out page mappings from the 1889 * kernel -- it is meant only for temporary mappings. 1890 * Note: SMP coherent. Uses a ranged shootdown IPI. 1891 */ 1892 static void 1893 __CONCAT(PMTYPE, qremove)(vm_offset_t sva, int count) 1894 { 1895 vm_offset_t va; 1896 1897 va = sva; 1898 while (count-- > 0) { 1899 pmap_kremove(va); 1900 va += PAGE_SIZE; 1901 } 1902 pmap_invalidate_range_int(kernel_pmap, sva, va); 1903 } 1904 1905 /*************************************************** 1906 * Page table page management routines..... 1907 ***************************************************/ 1908 /* 1909 * Schedule the specified unused page table page to be freed. Specifically, 1910 * add the page to the specified list of pages that will be released to the 1911 * physical memory manager after the TLB has been updated. 1912 */ 1913 static __inline void 1914 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 1915 boolean_t set_PG_ZERO) 1916 { 1917 1918 if (set_PG_ZERO) 1919 m->flags |= PG_ZERO; 1920 else 1921 m->flags &= ~PG_ZERO; 1922 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 1923 } 1924 1925 /* 1926 * Inserts the specified page table page into the specified pmap's collection 1927 * of idle page table pages. Each of a pmap's page table pages is responsible 1928 * for mapping a distinct range of virtual addresses. The pmap's collection is 1929 * ordered by this virtual address range. 1930 * 1931 * If "promoted" is false, then the page table page "mpte" must be zero filled. 1932 */ 1933 static __inline int 1934 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted) 1935 { 1936 1937 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1938 mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0; 1939 return (vm_radix_insert(&pmap->pm_root, mpte)); 1940 } 1941 1942 /* 1943 * Removes the page table page mapping the specified virtual address from the 1944 * specified pmap's collection of idle page table pages, and returns it. 1945 * Otherwise, returns NULL if there is no page table page corresponding to the 1946 * specified virtual address. 1947 */ 1948 static __inline vm_page_t 1949 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) 1950 { 1951 1952 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1953 return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT)); 1954 } 1955 1956 /* 1957 * Decrements a page table page's reference count, which is used to record the 1958 * number of valid page table entries within the page. If the reference count 1959 * drops to zero, then the page table page is unmapped. Returns TRUE if the 1960 * page table page was unmapped and FALSE otherwise. 1961 */ 1962 static inline boolean_t 1963 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1964 { 1965 1966 --m->ref_count; 1967 if (m->ref_count == 0) { 1968 _pmap_unwire_ptp(pmap, m, free); 1969 return (TRUE); 1970 } else 1971 return (FALSE); 1972 } 1973 1974 static void 1975 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1976 { 1977 1978 /* 1979 * unmap the page table page 1980 */ 1981 pmap->pm_pdir[m->pindex] = 0; 1982 --pmap->pm_stats.resident_count; 1983 1984 /* 1985 * There is not need to invalidate the recursive mapping since 1986 * we never instantiate such mapping for the usermode pmaps, 1987 * and never remove page table pages from the kernel pmap. 1988 * Put page on a list so that it is released since all TLB 1989 * shootdown is done. 1990 */ 1991 MPASS(pmap != kernel_pmap); 1992 pmap_add_delayed_free_list(m, free, TRUE); 1993 } 1994 1995 /* 1996 * After removing a page table entry, this routine is used to 1997 * conditionally free the page, and manage the reference count. 1998 */ 1999 static int 2000 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) 2001 { 2002 pd_entry_t ptepde; 2003 vm_page_t mpte; 2004 2005 if (pmap == kernel_pmap) 2006 return (0); 2007 ptepde = *pmap_pde(pmap, va); 2008 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 2009 return (pmap_unwire_ptp(pmap, mpte, free)); 2010 } 2011 2012 /* 2013 * Release a page table page reference after a failed attempt to create a 2014 * mapping. 2015 */ 2016 static void 2017 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 2018 { 2019 struct spglist free; 2020 2021 SLIST_INIT(&free); 2022 if (pmap_unwire_ptp(pmap, mpte, &free)) { 2023 /* 2024 * Although "va" was never mapped, paging-structure caches 2025 * could nonetheless have entries that refer to the freed 2026 * page table pages. Invalidate those entries. 2027 */ 2028 pmap_invalidate_page_int(pmap, va); 2029 vm_page_free_pages_toq(&free, true); 2030 } 2031 } 2032 2033 /* 2034 * Initialize the pmap for the swapper process. 2035 */ 2036 static void 2037 __CONCAT(PMTYPE, pinit0)(pmap_t pmap) 2038 { 2039 2040 PMAP_LOCK_INIT(pmap); 2041 pmap->pm_pdir = IdlePTD; 2042 #ifdef PMAP_PAE_COMP 2043 pmap->pm_pdpt = IdlePDPT; 2044 #endif 2045 vm_radix_init(&pmap->pm_root); 2046 CPU_ZERO(&pmap->pm_active); 2047 TAILQ_INIT(&pmap->pm_pvchunk); 2048 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2049 pmap_activate_boot(pmap); 2050 } 2051 2052 /* 2053 * Initialize a preallocated and zeroed pmap structure, 2054 * such as one in a vmspace structure. 2055 */ 2056 static int 2057 __CONCAT(PMTYPE, pinit)(pmap_t pmap) 2058 { 2059 int i; 2060 2061 /* 2062 * No need to allocate page table space yet but we do need a valid 2063 * page directory table. 2064 */ 2065 if (pmap->pm_pdir == NULL) { 2066 pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); 2067 if (pmap->pm_pdir == NULL) 2068 return (0); 2069 #ifdef PMAP_PAE_COMP 2070 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 2071 KASSERT(((vm_offset_t)pmap->pm_pdpt & 2072 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 2073 ("pmap_pinit: pdpt misaligned")); 2074 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 2075 ("pmap_pinit: pdpt above 4g")); 2076 #endif 2077 vm_radix_init(&pmap->pm_root); 2078 } 2079 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2080 ("pmap_pinit: pmap has reserved page table page(s)")); 2081 2082 /* 2083 * allocate the page directory page(s) 2084 */ 2085 for (i = 0; i < NPGPTD; i++) { 2086 pmap->pm_ptdpg[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED | 2087 VM_ALLOC_ZERO | VM_ALLOC_WAITOK); 2088 #ifdef PMAP_PAE_COMP 2089 pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(pmap->pm_ptdpg[i]) | PG_V; 2090 #endif 2091 } 2092 2093 pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD); 2094 #ifdef PMAP_PAE_COMP 2095 if ((cpu_feature & CPUID_PAT) == 0) { 2096 pmap_invalidate_cache_range( 2097 trunc_page((vm_offset_t)pmap->pm_pdpt), 2098 round_page((vm_offset_t)pmap->pm_pdpt + 2099 NPGPTD * sizeof(pdpt_entry_t))); 2100 } 2101 #endif 2102 2103 /* Install the trampoline mapping. */ 2104 pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; 2105 2106 CPU_ZERO(&pmap->pm_active); 2107 TAILQ_INIT(&pmap->pm_pvchunk); 2108 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2109 2110 return (1); 2111 } 2112 2113 /* 2114 * this routine is called if the page table page is not 2115 * mapped correctly. 2116 */ 2117 static vm_page_t 2118 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) 2119 { 2120 vm_paddr_t ptepa; 2121 vm_page_t m; 2122 2123 /* 2124 * Allocate a page table page. 2125 */ 2126 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 2127 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2128 PMAP_UNLOCK(pmap); 2129 rw_wunlock(&pvh_global_lock); 2130 vm_wait(NULL); 2131 rw_wlock(&pvh_global_lock); 2132 PMAP_LOCK(pmap); 2133 } 2134 2135 /* 2136 * Indicate the need to retry. While waiting, the page table 2137 * page may have been allocated. 2138 */ 2139 return (NULL); 2140 } 2141 m->pindex = ptepindex; 2142 2143 /* 2144 * Map the pagetable page into the process address space, if 2145 * it isn't already there. 2146 */ 2147 2148 pmap->pm_stats.resident_count++; 2149 2150 ptepa = VM_PAGE_TO_PHYS(m); 2151 KASSERT((pmap->pm_pdir[ptepindex] & PG_V) == 0, 2152 ("%s: page directory entry %#jx is valid", 2153 __func__, (uintmax_t)pmap->pm_pdir[ptepindex])); 2154 pmap->pm_pdir[ptepindex] = 2155 (pd_entry_t)(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 2156 2157 return (m); 2158 } 2159 2160 static vm_page_t 2161 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) 2162 { 2163 u_int ptepindex; 2164 pd_entry_t ptepa; 2165 vm_page_t m; 2166 2167 /* 2168 * Calculate pagetable page index 2169 */ 2170 ptepindex = va >> PDRSHIFT; 2171 retry: 2172 /* 2173 * Get the page directory entry 2174 */ 2175 ptepa = pmap->pm_pdir[ptepindex]; 2176 2177 /* 2178 * This supports switching from a 4MB page to a 2179 * normal 4K page. 2180 */ 2181 if (ptepa & PG_PS) { 2182 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va); 2183 ptepa = pmap->pm_pdir[ptepindex]; 2184 } 2185 2186 /* 2187 * If the page table page is mapped, we just increment the 2188 * hold count, and activate it. 2189 */ 2190 if (ptepa) { 2191 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 2192 m->ref_count++; 2193 } else { 2194 /* 2195 * Here if the pte page isn't mapped, or if it has 2196 * been deallocated. 2197 */ 2198 m = _pmap_allocpte(pmap, ptepindex, flags); 2199 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2200 goto retry; 2201 } 2202 return (m); 2203 } 2204 2205 /*************************************************** 2206 * Pmap allocation/deallocation routines. 2207 ***************************************************/ 2208 2209 /* 2210 * Release any resources held by the given physical map. 2211 * Called when a pmap initialized by pmap_pinit is being released. 2212 * Should only be called if the map contains no valid mappings. 2213 */ 2214 static void 2215 __CONCAT(PMTYPE, release)(pmap_t pmap) 2216 { 2217 vm_page_t m; 2218 int i; 2219 2220 KASSERT(pmap->pm_stats.resident_count == 0, 2221 ("pmap_release: pmap resident count %ld != 0", 2222 pmap->pm_stats.resident_count)); 2223 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2224 ("pmap_release: pmap has reserved page table page(s)")); 2225 KASSERT(CPU_EMPTY(&pmap->pm_active), 2226 ("releasing active pmap %p", pmap)); 2227 2228 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 2229 2230 for (i = 0; i < NPGPTD; i++) { 2231 m = pmap->pm_ptdpg[i]; 2232 #ifdef PMAP_PAE_COMP 2233 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 2234 ("pmap_release: got wrong ptd page")); 2235 #endif 2236 vm_page_unwire_noq(m); 2237 vm_page_free(m); 2238 } 2239 } 2240 2241 /* 2242 * grow the number of kernel page table entries, if needed 2243 */ 2244 static void 2245 __CONCAT(PMTYPE, growkernel)(vm_offset_t addr) 2246 { 2247 vm_paddr_t ptppaddr; 2248 vm_page_t nkpg; 2249 pd_entry_t newpdir; 2250 2251 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2252 addr = roundup2(addr, NBPDR); 2253 if (addr - 1 >= vm_map_max(kernel_map)) 2254 addr = vm_map_max(kernel_map); 2255 while (kernel_vm_end < addr) { 2256 if (pdir_pde(PTD, kernel_vm_end)) { 2257 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2258 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2259 kernel_vm_end = vm_map_max(kernel_map); 2260 break; 2261 } 2262 continue; 2263 } 2264 2265 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | 2266 VM_ALLOC_ZERO); 2267 if (nkpg == NULL) 2268 panic("pmap_growkernel: no memory to grow kernel"); 2269 nkpg->pindex = kernel_vm_end >> PDRSHIFT; 2270 nkpt++; 2271 2272 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 2273 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 2274 pdir_pde(KPTD, kernel_vm_end) = newpdir; 2275 2276 pmap_kenter_pde(kernel_vm_end, newpdir); 2277 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2278 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2279 kernel_vm_end = vm_map_max(kernel_map); 2280 break; 2281 } 2282 } 2283 } 2284 2285 /*************************************************** 2286 * page management routines. 2287 ***************************************************/ 2288 2289 static const uint32_t pc_freemask[_NPCM] = { 2290 [0 ... _NPCM - 2] = PC_FREEN, 2291 [_NPCM - 1] = PC_FREEL 2292 }; 2293 2294 #ifdef PV_STATS 2295 extern int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2296 extern long pv_entry_frees, pv_entry_allocs; 2297 extern int pv_entry_spare; 2298 #endif 2299 2300 /* 2301 * We are in a serious low memory condition. Resort to 2302 * drastic measures to free some pages so we can allocate 2303 * another pv entry chunk. 2304 */ 2305 static vm_page_t 2306 pmap_pv_reclaim(pmap_t locked_pmap) 2307 { 2308 struct pch newtail; 2309 struct pv_chunk *pc; 2310 struct md_page *pvh; 2311 pd_entry_t *pde; 2312 pmap_t pmap; 2313 pt_entry_t *pte, tpte; 2314 pv_entry_t pv; 2315 vm_offset_t va; 2316 vm_page_t m, m_pc; 2317 struct spglist free; 2318 uint32_t inuse; 2319 int bit, field, freed; 2320 2321 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2322 pmap = NULL; 2323 m_pc = NULL; 2324 SLIST_INIT(&free); 2325 TAILQ_INIT(&newtail); 2326 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2327 SLIST_EMPTY(&free))) { 2328 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2329 if (pmap != pc->pc_pmap) { 2330 if (pmap != NULL) { 2331 pmap_invalidate_all_int(pmap); 2332 if (pmap != locked_pmap) 2333 PMAP_UNLOCK(pmap); 2334 } 2335 pmap = pc->pc_pmap; 2336 /* Avoid deadlock and lock recursion. */ 2337 if (pmap > locked_pmap) 2338 PMAP_LOCK(pmap); 2339 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2340 pmap = NULL; 2341 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2342 continue; 2343 } 2344 } 2345 2346 /* 2347 * Destroy every non-wired, 4 KB page mapping in the chunk. 2348 */ 2349 freed = 0; 2350 for (field = 0; field < _NPCM; field++) { 2351 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2352 inuse != 0; inuse &= ~(1UL << bit)) { 2353 bit = bsfl(inuse); 2354 pv = &pc->pc_pventry[field * 32 + bit]; 2355 va = pv->pv_va; 2356 pde = pmap_pde(pmap, va); 2357 if ((*pde & PG_PS) != 0) 2358 continue; 2359 pte = __CONCAT(PMTYPE, pte)(pmap, va); 2360 tpte = *pte; 2361 if ((tpte & PG_W) == 0) 2362 tpte = pte_load_clear(pte); 2363 pmap_pte_release(pte); 2364 if ((tpte & PG_W) != 0) 2365 continue; 2366 KASSERT(tpte != 0, 2367 ("pmap_pv_reclaim: pmap %p va %x zero pte", 2368 pmap, va)); 2369 if ((tpte & PG_G) != 0) 2370 pmap_invalidate_page_int(pmap, va); 2371 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2372 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2373 vm_page_dirty(m); 2374 if ((tpte & PG_A) != 0) 2375 vm_page_aflag_set(m, PGA_REFERENCED); 2376 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2377 if (TAILQ_EMPTY(&m->md.pv_list) && 2378 (m->flags & PG_FICTITIOUS) == 0) { 2379 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2380 if (TAILQ_EMPTY(&pvh->pv_list)) { 2381 vm_page_aflag_clear(m, 2382 PGA_WRITEABLE); 2383 } 2384 } 2385 pc->pc_map[field] |= 1UL << bit; 2386 pmap_unuse_pt(pmap, va, &free); 2387 freed++; 2388 } 2389 } 2390 if (freed == 0) { 2391 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2392 continue; 2393 } 2394 /* Every freed mapping is for a 4 KB page. */ 2395 pmap->pm_stats.resident_count -= freed; 2396 PV_STAT(pv_entry_frees += freed); 2397 PV_STAT(pv_entry_spare += freed); 2398 pv_entry_count -= freed; 2399 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2400 for (field = 0; field < _NPCM; field++) 2401 if (pc->pc_map[field] != pc_freemask[field]) { 2402 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2403 pc_list); 2404 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2405 2406 /* 2407 * One freed pv entry in locked_pmap is 2408 * sufficient. 2409 */ 2410 if (pmap == locked_pmap) 2411 goto out; 2412 break; 2413 } 2414 if (field == _NPCM) { 2415 PV_STAT(pv_entry_spare -= _NPCPV); 2416 PV_STAT(pc_chunk_count--); 2417 PV_STAT(pc_chunk_frees++); 2418 /* Entire chunk is free; return it. */ 2419 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2420 pmap_qremove((vm_offset_t)pc, 1); 2421 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2422 break; 2423 } 2424 } 2425 out: 2426 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2427 if (pmap != NULL) { 2428 pmap_invalidate_all_int(pmap); 2429 if (pmap != locked_pmap) 2430 PMAP_UNLOCK(pmap); 2431 } 2432 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2433 m_pc = SLIST_FIRST(&free); 2434 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2435 /* Recycle a freed page table page. */ 2436 m_pc->ref_count = 1; 2437 } 2438 vm_page_free_pages_toq(&free, true); 2439 return (m_pc); 2440 } 2441 2442 /* 2443 * free the pv_entry back to the free list 2444 */ 2445 static void 2446 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2447 { 2448 struct pv_chunk *pc; 2449 int idx, field, bit; 2450 2451 rw_assert(&pvh_global_lock, RA_WLOCKED); 2452 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2453 PV_STAT(pv_entry_frees++); 2454 PV_STAT(pv_entry_spare++); 2455 pv_entry_count--; 2456 pc = pv_to_chunk(pv); 2457 idx = pv - &pc->pc_pventry[0]; 2458 field = idx / 32; 2459 bit = idx % 32; 2460 pc->pc_map[field] |= 1ul << bit; 2461 for (idx = 0; idx < _NPCM; idx++) 2462 if (pc->pc_map[idx] != pc_freemask[idx]) { 2463 /* 2464 * 98% of the time, pc is already at the head of the 2465 * list. If it isn't already, move it to the head. 2466 */ 2467 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2468 pc)) { 2469 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2470 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2471 pc_list); 2472 } 2473 return; 2474 } 2475 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2476 free_pv_chunk(pc); 2477 } 2478 2479 static void 2480 free_pv_chunk(struct pv_chunk *pc) 2481 { 2482 vm_page_t m; 2483 2484 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2485 PV_STAT(pv_entry_spare -= _NPCPV); 2486 PV_STAT(pc_chunk_count--); 2487 PV_STAT(pc_chunk_frees++); 2488 /* entire chunk is free, return it */ 2489 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2490 pmap_qremove((vm_offset_t)pc, 1); 2491 vm_page_unwire_noq(m); 2492 vm_page_free(m); 2493 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2494 } 2495 2496 /* 2497 * get a new pv_entry, allocating a block from the system 2498 * when needed. 2499 */ 2500 static pv_entry_t 2501 get_pv_entry(pmap_t pmap, boolean_t try) 2502 { 2503 static const struct timeval printinterval = { 60, 0 }; 2504 static struct timeval lastprint; 2505 int bit, field; 2506 pv_entry_t pv; 2507 struct pv_chunk *pc; 2508 vm_page_t m; 2509 2510 rw_assert(&pvh_global_lock, RA_WLOCKED); 2511 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2512 PV_STAT(pv_entry_allocs++); 2513 pv_entry_count++; 2514 if (pv_entry_count > pv_entry_high_water) 2515 if (ratecheck(&lastprint, &printinterval)) 2516 printf("Approaching the limit on PV entries, consider " 2517 "increasing either the vm.pmap.shpgperproc or the " 2518 "vm.pmap.pv_entries tunable.\n"); 2519 retry: 2520 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2521 if (pc != NULL) { 2522 for (field = 0; field < _NPCM; field++) { 2523 if (pc->pc_map[field]) { 2524 bit = bsfl(pc->pc_map[field]); 2525 break; 2526 } 2527 } 2528 if (field < _NPCM) { 2529 pv = &pc->pc_pventry[field * 32 + bit]; 2530 pc->pc_map[field] &= ~(1ul << bit); 2531 /* If this was the last item, move it to tail */ 2532 for (field = 0; field < _NPCM; field++) 2533 if (pc->pc_map[field] != 0) { 2534 PV_STAT(pv_entry_spare--); 2535 return (pv); /* not full, return */ 2536 } 2537 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2538 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2539 PV_STAT(pv_entry_spare--); 2540 return (pv); 2541 } 2542 } 2543 /* 2544 * Access to the ptelist "pv_vafree" is synchronized by the pvh 2545 * global lock. If "pv_vafree" is currently non-empty, it will 2546 * remain non-empty until pmap_ptelist_alloc() completes. 2547 */ 2548 if (pv_vafree == 0 || 2549 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2550 if (try) { 2551 pv_entry_count--; 2552 PV_STAT(pc_chunk_tryfail++); 2553 return (NULL); 2554 } 2555 m = pmap_pv_reclaim(pmap); 2556 if (m == NULL) 2557 goto retry; 2558 } 2559 PV_STAT(pc_chunk_count++); 2560 PV_STAT(pc_chunk_allocs++); 2561 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2562 pmap_qenter((vm_offset_t)pc, &m, 1); 2563 pc->pc_pmap = pmap; 2564 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2565 for (field = 1; field < _NPCM; field++) 2566 pc->pc_map[field] = pc_freemask[field]; 2567 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2568 pv = &pc->pc_pventry[0]; 2569 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2570 PV_STAT(pv_entry_spare += _NPCPV - 1); 2571 return (pv); 2572 } 2573 2574 static __inline pv_entry_t 2575 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2576 { 2577 pv_entry_t pv; 2578 2579 rw_assert(&pvh_global_lock, RA_WLOCKED); 2580 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 2581 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2582 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 2583 break; 2584 } 2585 } 2586 return (pv); 2587 } 2588 2589 static void 2590 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2591 { 2592 struct md_page *pvh; 2593 pv_entry_t pv; 2594 vm_offset_t va_last; 2595 vm_page_t m; 2596 2597 rw_assert(&pvh_global_lock, RA_WLOCKED); 2598 KASSERT((pa & PDRMASK) == 0, 2599 ("pmap_pv_demote_pde: pa is not 4mpage aligned")); 2600 2601 /* 2602 * Transfer the 4mpage's pv entry for this mapping to the first 2603 * page's pv list. 2604 */ 2605 pvh = pa_to_pvh(pa); 2606 va = trunc_4mpage(va); 2607 pv = pmap_pvh_remove(pvh, pmap, va); 2608 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 2609 m = PHYS_TO_VM_PAGE(pa); 2610 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2611 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 2612 va_last = va + NBPDR - PAGE_SIZE; 2613 do { 2614 m++; 2615 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2616 ("pmap_pv_demote_pde: page %p is not managed", m)); 2617 va += PAGE_SIZE; 2618 pmap_insert_entry(pmap, va, m); 2619 } while (va < va_last); 2620 } 2621 2622 #if VM_NRESERVLEVEL > 0 2623 static void 2624 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2625 { 2626 struct md_page *pvh; 2627 pv_entry_t pv; 2628 vm_offset_t va_last; 2629 vm_page_t m; 2630 2631 rw_assert(&pvh_global_lock, RA_WLOCKED); 2632 KASSERT((pa & PDRMASK) == 0, 2633 ("pmap_pv_promote_pde: pa is not 4mpage aligned")); 2634 2635 /* 2636 * Transfer the first page's pv entry for this mapping to the 2637 * 4mpage's pv list. Aside from avoiding the cost of a call 2638 * to get_pv_entry(), a transfer avoids the possibility that 2639 * get_pv_entry() calls pmap_collect() and that pmap_collect() 2640 * removes one of the mappings that is being promoted. 2641 */ 2642 m = PHYS_TO_VM_PAGE(pa); 2643 va = trunc_4mpage(va); 2644 pv = pmap_pvh_remove(&m->md, pmap, va); 2645 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 2646 pvh = pa_to_pvh(pa); 2647 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2648 /* Free the remaining NPTEPG - 1 pv entries. */ 2649 va_last = va + NBPDR - PAGE_SIZE; 2650 do { 2651 m++; 2652 va += PAGE_SIZE; 2653 pmap_pvh_free(&m->md, pmap, va); 2654 } while (va < va_last); 2655 } 2656 #endif /* VM_NRESERVLEVEL > 0 */ 2657 2658 static void 2659 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2660 { 2661 pv_entry_t pv; 2662 2663 pv = pmap_pvh_remove(pvh, pmap, va); 2664 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2665 free_pv_entry(pmap, pv); 2666 } 2667 2668 static void 2669 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2670 { 2671 struct md_page *pvh; 2672 2673 rw_assert(&pvh_global_lock, RA_WLOCKED); 2674 pmap_pvh_free(&m->md, pmap, va); 2675 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 2676 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2677 if (TAILQ_EMPTY(&pvh->pv_list)) 2678 vm_page_aflag_clear(m, PGA_WRITEABLE); 2679 } 2680 } 2681 2682 /* 2683 * Create a pv entry for page at pa for 2684 * (pmap, va). 2685 */ 2686 static void 2687 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2688 { 2689 pv_entry_t pv; 2690 2691 rw_assert(&pvh_global_lock, RA_WLOCKED); 2692 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2693 pv = get_pv_entry(pmap, FALSE); 2694 pv->pv_va = va; 2695 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2696 } 2697 2698 /* 2699 * Conditionally create a pv entry. 2700 */ 2701 static boolean_t 2702 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2703 { 2704 pv_entry_t pv; 2705 2706 rw_assert(&pvh_global_lock, RA_WLOCKED); 2707 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2708 if (pv_entry_count < pv_entry_high_water && 2709 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2710 pv->pv_va = va; 2711 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2712 return (TRUE); 2713 } else 2714 return (FALSE); 2715 } 2716 2717 /* 2718 * Create the pv entries for each of the pages within a superpage. 2719 */ 2720 static bool 2721 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags) 2722 { 2723 struct md_page *pvh; 2724 pv_entry_t pv; 2725 bool noreclaim; 2726 2727 rw_assert(&pvh_global_lock, RA_WLOCKED); 2728 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 2729 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 2730 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 2731 return (false); 2732 pv->pv_va = va; 2733 pvh = pa_to_pvh(pde & PG_PS_FRAME); 2734 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2735 return (true); 2736 } 2737 2738 /* 2739 * Fills a page table page with mappings to consecutive physical pages. 2740 */ 2741 static void 2742 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 2743 { 2744 pt_entry_t *pte; 2745 2746 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 2747 *pte = newpte; 2748 newpte += PAGE_SIZE; 2749 } 2750 } 2751 2752 /* 2753 * Tries to demote a 2- or 4MB page mapping. If demotion fails, the 2754 * 2- or 4MB page mapping is invalidated. 2755 */ 2756 static boolean_t 2757 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2758 { 2759 pd_entry_t newpde, oldpde; 2760 pt_entry_t *firstpte, newpte; 2761 vm_paddr_t mptepa; 2762 vm_page_t mpte; 2763 struct spglist free; 2764 vm_offset_t sva; 2765 2766 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2767 oldpde = *pde; 2768 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), 2769 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); 2770 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == 2771 NULL) { 2772 KASSERT((oldpde & PG_W) == 0, 2773 ("pmap_demote_pde: page table page for a wired mapping" 2774 " is missing")); 2775 2776 /* 2777 * Invalidate the 2- or 4MB page mapping and return 2778 * "failure" if the mapping was never accessed or the 2779 * allocation of the new page table page fails. 2780 */ 2781 if ((oldpde & PG_A) == 0 || 2782 (mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2783 SLIST_INIT(&free); 2784 sva = trunc_4mpage(va); 2785 pmap_remove_pde(pmap, pde, sva, &free); 2786 if ((oldpde & PG_G) == 0) 2787 pmap_invalidate_pde_page(pmap, sva, oldpde); 2788 vm_page_free_pages_toq(&free, true); 2789 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" 2790 " in pmap %p", va, pmap); 2791 return (FALSE); 2792 } 2793 mpte->pindex = va >> PDRSHIFT; 2794 if (pmap != kernel_pmap) { 2795 mpte->ref_count = NPTEPG; 2796 pmap->pm_stats.resident_count++; 2797 } 2798 } 2799 mptepa = VM_PAGE_TO_PHYS(mpte); 2800 2801 /* 2802 * If the page mapping is in the kernel's address space, then the 2803 * KPTmap can provide access to the page table page. Otherwise, 2804 * temporarily map the page table page (mpte) into the kernel's 2805 * address space at either PADDR1 or PADDR2. 2806 */ 2807 if (pmap == kernel_pmap) 2808 firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; 2809 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 2810 if ((*PMAP1 & PG_FRAME) != mptepa) { 2811 *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2812 #ifdef SMP 2813 PMAP1cpu = PCPU_GET(cpuid); 2814 #endif 2815 invlcaddr(PADDR1); 2816 PMAP1changed++; 2817 } else 2818 #ifdef SMP 2819 if (PMAP1cpu != PCPU_GET(cpuid)) { 2820 PMAP1cpu = PCPU_GET(cpuid); 2821 invlcaddr(PADDR1); 2822 PMAP1changedcpu++; 2823 } else 2824 #endif 2825 PMAP1unchanged++; 2826 firstpte = PADDR1; 2827 } else { 2828 mtx_lock(&PMAP2mutex); 2829 if ((*PMAP2 & PG_FRAME) != mptepa) { 2830 *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2831 pmap_invalidate_page_int(kernel_pmap, 2832 (vm_offset_t)PADDR2); 2833 } 2834 firstpte = PADDR2; 2835 } 2836 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; 2837 KASSERT((oldpde & PG_A) != 0, 2838 ("pmap_demote_pde: oldpde is missing PG_A")); 2839 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 2840 ("pmap_demote_pde: oldpde is missing PG_M")); 2841 newpte = oldpde & ~PG_PS; 2842 if ((newpte & PG_PDE_PAT) != 0) 2843 newpte ^= PG_PDE_PAT | PG_PTE_PAT; 2844 2845 /* 2846 * If the page table page is not leftover from an earlier promotion, 2847 * initialize it. 2848 */ 2849 if (vm_page_none_valid(mpte)) 2850 pmap_fill_ptp(firstpte, newpte); 2851 2852 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), 2853 ("pmap_demote_pde: firstpte and newpte map different physical" 2854 " addresses")); 2855 2856 /* 2857 * If the mapping has changed attributes, update the page table 2858 * entries. 2859 */ 2860 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) 2861 pmap_fill_ptp(firstpte, newpte); 2862 2863 /* 2864 * Demote the mapping. This pmap is locked. The old PDE has 2865 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 2866 * set. Thus, there is no danger of a race with another 2867 * processor changing the setting of PG_A and/or PG_M between 2868 * the read above and the store below. 2869 */ 2870 if (workaround_erratum383) 2871 pmap_update_pde(pmap, va, pde, newpde); 2872 else if (pmap == kernel_pmap) 2873 pmap_kenter_pde(va, newpde); 2874 else 2875 pde_store(pde, newpde); 2876 if (firstpte == PADDR2) 2877 mtx_unlock(&PMAP2mutex); 2878 2879 /* 2880 * Invalidate the recursive mapping of the page table page. 2881 */ 2882 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2883 2884 /* 2885 * Demote the pv entry. This depends on the earlier demotion 2886 * of the mapping. Specifically, the (re)creation of a per- 2887 * page pv entry might trigger the execution of pmap_collect(), 2888 * which might reclaim a newly (re)created per-page pv entry 2889 * and destroy the associated mapping. In order to destroy 2890 * the mapping, the PDE must have already changed from mapping 2891 * the 2mpage to referencing the page table page. 2892 */ 2893 if ((oldpde & PG_MANAGED) != 0) 2894 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); 2895 2896 pmap_pde_demotions++; 2897 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" 2898 " in pmap %p", va, pmap); 2899 return (TRUE); 2900 } 2901 2902 /* 2903 * Removes a 2- or 4MB page mapping from the kernel pmap. 2904 */ 2905 static void 2906 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2907 { 2908 pd_entry_t newpde; 2909 vm_paddr_t mptepa; 2910 vm_page_t mpte; 2911 2912 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2913 mpte = pmap_remove_pt_page(pmap, va); 2914 if (mpte == NULL) 2915 panic("pmap_remove_kernel_pde: Missing pt page."); 2916 2917 mptepa = VM_PAGE_TO_PHYS(mpte); 2918 newpde = mptepa | PG_M | PG_A | PG_RW | PG_V; 2919 2920 /* 2921 * If this page table page was unmapped by a promotion, then it 2922 * contains valid mappings. Zero it to invalidate those mappings. 2923 */ 2924 if (vm_page_any_valid(mpte)) 2925 pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]); 2926 2927 /* 2928 * Remove the mapping. 2929 */ 2930 if (workaround_erratum383) 2931 pmap_update_pde(pmap, va, pde, newpde); 2932 else 2933 pmap_kenter_pde(va, newpde); 2934 2935 /* 2936 * Invalidate the recursive mapping of the page table page. 2937 */ 2938 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2939 } 2940 2941 /* 2942 * pmap_remove_pde: do the things to unmap a superpage in a process 2943 */ 2944 static void 2945 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 2946 struct spglist *free) 2947 { 2948 struct md_page *pvh; 2949 pd_entry_t oldpde; 2950 vm_offset_t eva, va; 2951 vm_page_t m, mpte; 2952 2953 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2954 KASSERT((sva & PDRMASK) == 0, 2955 ("pmap_remove_pde: sva is not 4mpage aligned")); 2956 oldpde = pte_load_clear(pdq); 2957 if (oldpde & PG_W) 2958 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; 2959 2960 /* 2961 * Machines that don't support invlpg, also don't support 2962 * PG_G. 2963 */ 2964 if ((oldpde & PG_G) != 0) 2965 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 2966 2967 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2968 if (oldpde & PG_MANAGED) { 2969 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 2970 pmap_pvh_free(pvh, pmap, sva); 2971 eva = sva + NBPDR; 2972 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 2973 va < eva; va += PAGE_SIZE, m++) { 2974 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2975 vm_page_dirty(m); 2976 if (oldpde & PG_A) 2977 vm_page_aflag_set(m, PGA_REFERENCED); 2978 if (TAILQ_EMPTY(&m->md.pv_list) && 2979 TAILQ_EMPTY(&pvh->pv_list)) 2980 vm_page_aflag_clear(m, PGA_WRITEABLE); 2981 } 2982 } 2983 if (pmap == kernel_pmap) { 2984 pmap_remove_kernel_pde(pmap, pdq, sva); 2985 } else { 2986 mpte = pmap_remove_pt_page(pmap, sva); 2987 if (mpte != NULL) { 2988 KASSERT(vm_page_all_valid(mpte), 2989 ("pmap_remove_pde: pte page not promoted")); 2990 pmap->pm_stats.resident_count--; 2991 KASSERT(mpte->ref_count == NPTEPG, 2992 ("pmap_remove_pde: pte page ref count error")); 2993 mpte->ref_count = 0; 2994 pmap_add_delayed_free_list(mpte, free, FALSE); 2995 } 2996 } 2997 } 2998 2999 /* 3000 * pmap_remove_pte: do the things to unmap a page in a process 3001 */ 3002 static int 3003 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 3004 struct spglist *free) 3005 { 3006 pt_entry_t oldpte; 3007 vm_page_t m; 3008 3009 rw_assert(&pvh_global_lock, RA_WLOCKED); 3010 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3011 oldpte = pte_load_clear(ptq); 3012 KASSERT(oldpte != 0, 3013 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va)); 3014 if (oldpte & PG_W) 3015 pmap->pm_stats.wired_count -= 1; 3016 /* 3017 * Machines that don't support invlpg, also don't support 3018 * PG_G. 3019 */ 3020 if (oldpte & PG_G) 3021 pmap_invalidate_page_int(kernel_pmap, va); 3022 pmap->pm_stats.resident_count -= 1; 3023 if (oldpte & PG_MANAGED) { 3024 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 3025 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3026 vm_page_dirty(m); 3027 if (oldpte & PG_A) 3028 vm_page_aflag_set(m, PGA_REFERENCED); 3029 pmap_remove_entry(pmap, m, va); 3030 } 3031 return (pmap_unuse_pt(pmap, va, free)); 3032 } 3033 3034 /* 3035 * Remove a single page from a process address space 3036 */ 3037 static void 3038 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 3039 { 3040 pt_entry_t *pte; 3041 3042 rw_assert(&pvh_global_lock, RA_WLOCKED); 3043 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3044 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3045 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) 3046 return; 3047 pmap_remove_pte(pmap, pte, va, free); 3048 pmap_invalidate_page_int(pmap, va); 3049 } 3050 3051 /* 3052 * Removes the specified range of addresses from the page table page. 3053 */ 3054 static bool 3055 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3056 struct spglist *free) 3057 { 3058 pt_entry_t *pte; 3059 bool anyvalid; 3060 3061 rw_assert(&pvh_global_lock, RA_WLOCKED); 3062 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3063 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3064 anyvalid = false; 3065 for (pte = pmap_pte_quick(pmap, sva); sva != eva; pte++, 3066 sva += PAGE_SIZE) { 3067 if (*pte == 0) 3068 continue; 3069 3070 /* 3071 * The TLB entry for a PG_G mapping is invalidated by 3072 * pmap_remove_pte(). 3073 */ 3074 if ((*pte & PG_G) == 0) 3075 anyvalid = true; 3076 3077 if (pmap_remove_pte(pmap, pte, sva, free)) 3078 break; 3079 } 3080 return (anyvalid); 3081 } 3082 3083 /* 3084 * Remove the given range of addresses from the specified map. 3085 * 3086 * It is assumed that the start and end are properly 3087 * rounded to the page size. 3088 */ 3089 static void 3090 __CONCAT(PMTYPE, remove)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3091 { 3092 vm_offset_t pdnxt; 3093 pd_entry_t ptpaddr; 3094 struct spglist free; 3095 int anyvalid; 3096 3097 /* 3098 * Perform an unsynchronized read. This is, however, safe. 3099 */ 3100 if (pmap->pm_stats.resident_count == 0) 3101 return; 3102 3103 anyvalid = 0; 3104 SLIST_INIT(&free); 3105 3106 rw_wlock(&pvh_global_lock); 3107 sched_pin(); 3108 PMAP_LOCK(pmap); 3109 3110 /* 3111 * special handling of removing one page. a very 3112 * common operation and easy to short circuit some 3113 * code. 3114 */ 3115 if ((sva + PAGE_SIZE == eva) && 3116 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 3117 pmap_remove_page(pmap, sva, &free); 3118 goto out; 3119 } 3120 3121 for (; sva < eva; sva = pdnxt) { 3122 u_int pdirindex; 3123 3124 /* 3125 * Calculate index for next page table. 3126 */ 3127 pdnxt = (sva + NBPDR) & ~PDRMASK; 3128 if (pdnxt < sva) 3129 pdnxt = eva; 3130 if (pmap->pm_stats.resident_count == 0) 3131 break; 3132 3133 pdirindex = sva >> PDRSHIFT; 3134 ptpaddr = pmap->pm_pdir[pdirindex]; 3135 3136 /* 3137 * Weed out invalid mappings. Note: we assume that the page 3138 * directory table is always allocated, and in kernel virtual. 3139 */ 3140 if (ptpaddr == 0) 3141 continue; 3142 3143 /* 3144 * Check for large page. 3145 */ 3146 if ((ptpaddr & PG_PS) != 0) { 3147 /* 3148 * Are we removing the entire large page? If not, 3149 * demote the mapping and fall through. 3150 */ 3151 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3152 /* 3153 * The TLB entry for a PG_G mapping is 3154 * invalidated by pmap_remove_pde(). 3155 */ 3156 if ((ptpaddr & PG_G) == 0) 3157 anyvalid = 1; 3158 pmap_remove_pde(pmap, 3159 &pmap->pm_pdir[pdirindex], sva, &free); 3160 continue; 3161 } else if (!pmap_demote_pde(pmap, 3162 &pmap->pm_pdir[pdirindex], sva)) { 3163 /* The large page mapping was destroyed. */ 3164 continue; 3165 } 3166 } 3167 3168 /* 3169 * Limit our scan to either the end of the va represented 3170 * by the current page table page, or to the end of the 3171 * range being removed. 3172 */ 3173 if (pdnxt > eva) 3174 pdnxt = eva; 3175 3176 if (pmap_remove_ptes(pmap, sva, pdnxt, &free)) 3177 anyvalid = 1; 3178 } 3179 out: 3180 sched_unpin(); 3181 if (anyvalid) 3182 pmap_invalidate_all_int(pmap); 3183 rw_wunlock(&pvh_global_lock); 3184 PMAP_UNLOCK(pmap); 3185 vm_page_free_pages_toq(&free, true); 3186 } 3187 3188 /* 3189 * Routine: pmap_remove_all 3190 * Function: 3191 * Removes this physical page from 3192 * all physical maps in which it resides. 3193 * Reflects back modify bits to the pager. 3194 * 3195 * Notes: 3196 * Original versions of this routine were very 3197 * inefficient because they iteratively called 3198 * pmap_remove (slow...) 3199 */ 3200 3201 static void 3202 __CONCAT(PMTYPE, remove_all)(vm_page_t m) 3203 { 3204 struct md_page *pvh; 3205 pv_entry_t pv; 3206 pmap_t pmap; 3207 pt_entry_t *pte, tpte; 3208 pd_entry_t *pde; 3209 vm_offset_t va; 3210 struct spglist free; 3211 3212 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3213 ("pmap_remove_all: page %p is not managed", m)); 3214 SLIST_INIT(&free); 3215 rw_wlock(&pvh_global_lock); 3216 sched_pin(); 3217 if ((m->flags & PG_FICTITIOUS) != 0) 3218 goto small_mappings; 3219 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3220 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 3221 va = pv->pv_va; 3222 pmap = PV_PMAP(pv); 3223 PMAP_LOCK(pmap); 3224 pde = pmap_pde(pmap, va); 3225 (void)pmap_demote_pde(pmap, pde, va); 3226 PMAP_UNLOCK(pmap); 3227 } 3228 small_mappings: 3229 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3230 pmap = PV_PMAP(pv); 3231 PMAP_LOCK(pmap); 3232 pmap->pm_stats.resident_count--; 3233 pde = pmap_pde(pmap, pv->pv_va); 3234 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" 3235 " a 4mpage in page %p's pv list", m)); 3236 pte = pmap_pte_quick(pmap, pv->pv_va); 3237 tpte = pte_load_clear(pte); 3238 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte", 3239 pmap, pv->pv_va)); 3240 if (tpte & PG_W) 3241 pmap->pm_stats.wired_count--; 3242 if (tpte & PG_A) 3243 vm_page_aflag_set(m, PGA_REFERENCED); 3244 3245 /* 3246 * Update the vm_page_t clean and reference bits. 3247 */ 3248 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3249 vm_page_dirty(m); 3250 pmap_unuse_pt(pmap, pv->pv_va, &free); 3251 pmap_invalidate_page_int(pmap, pv->pv_va); 3252 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 3253 free_pv_entry(pmap, pv); 3254 PMAP_UNLOCK(pmap); 3255 } 3256 vm_page_aflag_clear(m, PGA_WRITEABLE); 3257 sched_unpin(); 3258 rw_wunlock(&pvh_global_lock); 3259 vm_page_free_pages_toq(&free, true); 3260 } 3261 3262 /* 3263 * pmap_protect_pde: do the things to protect a 4mpage in a process 3264 */ 3265 static boolean_t 3266 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) 3267 { 3268 pd_entry_t newpde, oldpde; 3269 vm_page_t m, mt; 3270 boolean_t anychanged; 3271 3272 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3273 KASSERT((sva & PDRMASK) == 0, 3274 ("pmap_protect_pde: sva is not 4mpage aligned")); 3275 anychanged = FALSE; 3276 retry: 3277 oldpde = newpde = *pde; 3278 if ((prot & VM_PROT_WRITE) == 0) { 3279 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == 3280 (PG_MANAGED | PG_M | PG_RW)) { 3281 m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3282 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 3283 vm_page_dirty(mt); 3284 } 3285 newpde &= ~(PG_RW | PG_M); 3286 } 3287 #ifdef PMAP_PAE_COMP 3288 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3289 newpde |= pg_nx; 3290 #endif 3291 if (newpde != oldpde) { 3292 /* 3293 * As an optimization to future operations on this PDE, clear 3294 * PG_PROMOTED. The impending invalidation will remove any 3295 * lingering 4KB page mappings from the TLB. 3296 */ 3297 if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED)) 3298 goto retry; 3299 if ((oldpde & PG_G) != 0) 3300 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 3301 else 3302 anychanged = TRUE; 3303 } 3304 return (anychanged); 3305 } 3306 3307 /* 3308 * Set the physical protection on the 3309 * specified range of this map as requested. 3310 */ 3311 static void 3312 __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3313 vm_prot_t prot) 3314 { 3315 vm_offset_t pdnxt; 3316 pd_entry_t ptpaddr; 3317 pt_entry_t *pte; 3318 boolean_t anychanged, pv_lists_locked; 3319 3320 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 3321 if (prot == VM_PROT_NONE) { 3322 pmap_remove(pmap, sva, eva); 3323 return; 3324 } 3325 3326 #ifdef PMAP_PAE_COMP 3327 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 3328 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 3329 return; 3330 #else 3331 if (prot & VM_PROT_WRITE) 3332 return; 3333 #endif 3334 3335 if (pmap_is_current(pmap)) 3336 pv_lists_locked = FALSE; 3337 else { 3338 pv_lists_locked = TRUE; 3339 resume: 3340 rw_wlock(&pvh_global_lock); 3341 sched_pin(); 3342 } 3343 anychanged = FALSE; 3344 3345 PMAP_LOCK(pmap); 3346 for (; sva < eva; sva = pdnxt) { 3347 pt_entry_t obits, pbits; 3348 u_int pdirindex; 3349 3350 pdnxt = (sva + NBPDR) & ~PDRMASK; 3351 if (pdnxt < sva) 3352 pdnxt = eva; 3353 3354 pdirindex = sva >> PDRSHIFT; 3355 ptpaddr = pmap->pm_pdir[pdirindex]; 3356 3357 /* 3358 * Weed out invalid mappings. Note: we assume that the page 3359 * directory table is always allocated, and in kernel virtual. 3360 */ 3361 if (ptpaddr == 0) 3362 continue; 3363 3364 /* 3365 * Check for large page. 3366 */ 3367 if ((ptpaddr & PG_PS) != 0) { 3368 /* 3369 * Are we protecting the entire large page? If not, 3370 * demote the mapping and fall through. 3371 */ 3372 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3373 /* 3374 * The TLB entry for a PG_G mapping is 3375 * invalidated by pmap_protect_pde(). 3376 */ 3377 if (pmap_protect_pde(pmap, 3378 &pmap->pm_pdir[pdirindex], sva, prot)) 3379 anychanged = TRUE; 3380 continue; 3381 } else { 3382 if (!pv_lists_locked) { 3383 pv_lists_locked = TRUE; 3384 if (!rw_try_wlock(&pvh_global_lock)) { 3385 if (anychanged) 3386 pmap_invalidate_all_int( 3387 pmap); 3388 PMAP_UNLOCK(pmap); 3389 goto resume; 3390 } 3391 sched_pin(); 3392 } 3393 if (!pmap_demote_pde(pmap, 3394 &pmap->pm_pdir[pdirindex], sva)) { 3395 /* 3396 * The large page mapping was 3397 * destroyed. 3398 */ 3399 continue; 3400 } 3401 } 3402 } 3403 3404 if (pdnxt > eva) 3405 pdnxt = eva; 3406 3407 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 3408 sva += PAGE_SIZE) { 3409 vm_page_t m; 3410 3411 retry: 3412 /* 3413 * Regardless of whether a pte is 32 or 64 bits in 3414 * size, PG_RW, PG_A, and PG_M are among the least 3415 * significant 32 bits. 3416 */ 3417 obits = pbits = *pte; 3418 if ((pbits & PG_V) == 0) 3419 continue; 3420 3421 if ((prot & VM_PROT_WRITE) == 0) { 3422 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 3423 (PG_MANAGED | PG_M | PG_RW)) { 3424 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 3425 vm_page_dirty(m); 3426 } 3427 pbits &= ~(PG_RW | PG_M); 3428 } 3429 #ifdef PMAP_PAE_COMP 3430 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3431 pbits |= pg_nx; 3432 #endif 3433 3434 if (pbits != obits) { 3435 #ifdef PMAP_PAE_COMP 3436 if (!atomic_cmpset_64(pte, obits, pbits)) 3437 goto retry; 3438 #else 3439 if (!atomic_cmpset_int((u_int *)pte, obits, 3440 pbits)) 3441 goto retry; 3442 #endif 3443 if (obits & PG_G) 3444 pmap_invalidate_page_int(pmap, sva); 3445 else 3446 anychanged = TRUE; 3447 } 3448 } 3449 } 3450 if (anychanged) 3451 pmap_invalidate_all_int(pmap); 3452 if (pv_lists_locked) { 3453 sched_unpin(); 3454 rw_wunlock(&pvh_global_lock); 3455 } 3456 PMAP_UNLOCK(pmap); 3457 } 3458 3459 #if VM_NRESERVLEVEL > 0 3460 /* 3461 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are 3462 * within a single page table page (PTP) to a single 2- or 4MB page mapping. 3463 * For promotion to occur, two conditions must be met: (1) the 4KB page 3464 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3465 * mappings must have identical characteristics. 3466 * 3467 * Managed (PG_MANAGED) mappings within the kernel address space are not 3468 * promoted. The reason is that kernel PDEs are replicated in each pmap but 3469 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel 3470 * pmap. 3471 */ 3472 static void 3473 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 3474 { 3475 pd_entry_t newpde; 3476 pt_entry_t *firstpte, oldpte, pa, *pte; 3477 #ifdef KTR 3478 vm_offset_t oldpteva; 3479 #endif 3480 vm_page_t mpte; 3481 3482 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3483 3484 /* 3485 * Examine the first PTE in the specified PTP. Abort if this PTE is 3486 * either invalid, unused, or does not map the first 4KB physical page 3487 * within a 2- or 4MB page. 3488 */ 3489 firstpte = pmap_pte_quick(pmap, trunc_4mpage(va)); 3490 setpde: 3491 newpde = *firstpte; 3492 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 3493 pmap_pde_p_failures++; 3494 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3495 " in pmap %p", va, pmap); 3496 return; 3497 } 3498 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) { 3499 pmap_pde_p_failures++; 3500 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3501 " in pmap %p", va, pmap); 3502 return; 3503 } 3504 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 3505 /* 3506 * When PG_M is already clear, PG_RW can be cleared without 3507 * a TLB invalidation. 3508 */ 3509 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde & 3510 ~PG_RW)) 3511 goto setpde; 3512 newpde &= ~PG_RW; 3513 } 3514 3515 /* 3516 * Examine each of the other PTEs in the specified PTP. Abort if this 3517 * PTE maps an unexpected 4KB physical page or does not have identical 3518 * characteristics to the first PTE. 3519 */ 3520 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; 3521 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 3522 setpte: 3523 oldpte = *pte; 3524 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 3525 pmap_pde_p_failures++; 3526 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3527 " in pmap %p", va, pmap); 3528 return; 3529 } 3530 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 3531 /* 3532 * When PG_M is already clear, PG_RW can be cleared 3533 * without a TLB invalidation. 3534 */ 3535 if (!atomic_cmpset_int((u_int *)pte, oldpte, 3536 oldpte & ~PG_RW)) 3537 goto setpte; 3538 oldpte &= ~PG_RW; 3539 #ifdef KTR 3540 oldpteva = (oldpte & PG_FRAME & PDRMASK) | 3541 (va & ~PDRMASK); 3542 #endif 3543 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x" 3544 " in pmap %p", oldpteva, pmap); 3545 } 3546 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 3547 pmap_pde_p_failures++; 3548 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3549 " in pmap %p", va, pmap); 3550 return; 3551 } 3552 pa -= PAGE_SIZE; 3553 } 3554 3555 /* 3556 * Save the page table page in its current state until the PDE 3557 * mapping the superpage is demoted by pmap_demote_pde() or 3558 * destroyed by pmap_remove_pde(). 3559 */ 3560 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3561 KASSERT(mpte >= vm_page_array && 3562 mpte < &vm_page_array[vm_page_array_size], 3563 ("pmap_promote_pde: page table page is out of range")); 3564 KASSERT(mpte->pindex == va >> PDRSHIFT, 3565 ("pmap_promote_pde: page table page's pindex is wrong")); 3566 if (pmap_insert_pt_page(pmap, mpte, true)) { 3567 pmap_pde_p_failures++; 3568 CTR2(KTR_PMAP, 3569 "pmap_promote_pde: failure for va %#x in pmap %p", va, 3570 pmap); 3571 return; 3572 } 3573 3574 /* 3575 * Promote the pv entries. 3576 */ 3577 if ((newpde & PG_MANAGED) != 0) 3578 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME); 3579 3580 /* 3581 * Propagate the PAT index to its proper position. 3582 */ 3583 if ((newpde & PG_PTE_PAT) != 0) 3584 newpde ^= PG_PDE_PAT | PG_PTE_PAT; 3585 3586 /* 3587 * Map the superpage. 3588 */ 3589 if (workaround_erratum383) 3590 pmap_update_pde(pmap, va, pde, PG_PS | newpde); 3591 else if (pmap == kernel_pmap) 3592 pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde); 3593 else 3594 pde_store(pde, PG_PROMOTED | PG_PS | newpde); 3595 3596 pmap_pde_promotions++; 3597 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" 3598 " in pmap %p", va, pmap); 3599 } 3600 #endif /* VM_NRESERVLEVEL > 0 */ 3601 3602 /* 3603 * Insert the given physical page (p) at 3604 * the specified virtual address (v) in the 3605 * target physical map with the protection requested. 3606 * 3607 * If specified, the page will be wired down, meaning 3608 * that the related pte can not be reclaimed. 3609 * 3610 * NB: This is the only routine which MAY NOT lazy-evaluate 3611 * or lose information. That is, this routine must actually 3612 * insert this page into the given map NOW. 3613 */ 3614 static int 3615 __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m, 3616 vm_prot_t prot, u_int flags, int8_t psind) 3617 { 3618 pd_entry_t *pde; 3619 pt_entry_t *pte; 3620 pt_entry_t newpte, origpte; 3621 pv_entry_t pv; 3622 vm_paddr_t opa, pa; 3623 vm_page_t mpte, om; 3624 int rv; 3625 3626 va = trunc_page(va); 3627 KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) || 3628 (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS), 3629 ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va)); 3630 KASSERT(va < PMAP_TRM_MIN_ADDRESS, 3631 ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", 3632 va)); 3633 KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || 3634 !VA_IS_CLEANMAP(va), 3635 ("pmap_enter: managed mapping within the clean submap")); 3636 if ((m->oflags & VPO_UNMANAGED) == 0) 3637 VM_PAGE_OBJECT_BUSY_ASSERT(m); 3638 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3639 ("pmap_enter: flags %u has reserved bits set", flags)); 3640 pa = VM_PAGE_TO_PHYS(m); 3641 newpte = (pt_entry_t)(pa | PG_A | PG_V); 3642 if ((flags & VM_PROT_WRITE) != 0) 3643 newpte |= PG_M; 3644 if ((prot & VM_PROT_WRITE) != 0) 3645 newpte |= PG_RW; 3646 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 3647 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 3648 #ifdef PMAP_PAE_COMP 3649 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3650 newpte |= pg_nx; 3651 #endif 3652 if ((flags & PMAP_ENTER_WIRED) != 0) 3653 newpte |= PG_W; 3654 if (pmap != kernel_pmap) 3655 newpte |= PG_U; 3656 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); 3657 if ((m->oflags & VPO_UNMANAGED) == 0) 3658 newpte |= PG_MANAGED; 3659 3660 rw_wlock(&pvh_global_lock); 3661 PMAP_LOCK(pmap); 3662 sched_pin(); 3663 if (psind == 1) { 3664 /* Assert the required virtual and physical alignment. */ 3665 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned")); 3666 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); 3667 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m); 3668 goto out; 3669 } 3670 3671 pde = pmap_pde(pmap, va); 3672 if (pmap != kernel_pmap) { 3673 /* 3674 * va is for UVA. 3675 * In the case that a page table page is not resident, 3676 * we are creating it here. pmap_allocpte() handles 3677 * demotion. 3678 */ 3679 mpte = pmap_allocpte(pmap, va, flags); 3680 if (mpte == NULL) { 3681 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3682 ("pmap_allocpte failed with sleep allowed")); 3683 rv = KERN_RESOURCE_SHORTAGE; 3684 goto out; 3685 } 3686 } else { 3687 /* 3688 * va is for KVA, so pmap_demote_pde() will never fail 3689 * to install a page table page. PG_V is also 3690 * asserted by pmap_demote_pde(). 3691 */ 3692 mpte = NULL; 3693 KASSERT(pde != NULL && (*pde & PG_V) != 0, 3694 ("KVA %#x invalid pde pdir %#jx", va, 3695 (uintmax_t)pmap->pm_pdir[PTDPTDI])); 3696 if ((*pde & PG_PS) != 0) 3697 pmap_demote_pde(pmap, pde, va); 3698 } 3699 pte = pmap_pte_quick(pmap, va); 3700 3701 /* 3702 * Page Directory table entry is not valid, which should not 3703 * happen. We should have either allocated the page table 3704 * page or demoted the existing mapping above. 3705 */ 3706 if (pte == NULL) { 3707 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 3708 (uintmax_t)pmap->pm_pdir[PTDPTDI], va); 3709 } 3710 3711 origpte = *pte; 3712 pv = NULL; 3713 3714 /* 3715 * Is the specified virtual address already mapped? 3716 */ 3717 if ((origpte & PG_V) != 0) { 3718 /* 3719 * Wiring change, just update stats. We don't worry about 3720 * wiring PT pages as they remain resident as long as there 3721 * are valid mappings in them. Hence, if a user page is wired, 3722 * the PT page will be also. 3723 */ 3724 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 3725 pmap->pm_stats.wired_count++; 3726 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 3727 pmap->pm_stats.wired_count--; 3728 3729 /* 3730 * Remove the extra PT page reference. 3731 */ 3732 if (mpte != NULL) { 3733 mpte->ref_count--; 3734 KASSERT(mpte->ref_count > 0, 3735 ("pmap_enter: missing reference to page table page," 3736 " va: 0x%x", va)); 3737 } 3738 3739 /* 3740 * Has the physical page changed? 3741 */ 3742 opa = origpte & PG_FRAME; 3743 if (opa == pa) { 3744 /* 3745 * No, might be a protection or wiring change. 3746 */ 3747 if ((origpte & PG_MANAGED) != 0 && 3748 (newpte & PG_RW) != 0) 3749 vm_page_aflag_set(m, PGA_WRITEABLE); 3750 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 3751 goto unchanged; 3752 goto validate; 3753 } 3754 3755 /* 3756 * The physical page has changed. Temporarily invalidate 3757 * the mapping. This ensures that all threads sharing the 3758 * pmap keep a consistent view of the mapping, which is 3759 * necessary for the correct handling of COW faults. It 3760 * also permits reuse of the old mapping's PV entry, 3761 * avoiding an allocation. 3762 * 3763 * For consistency, handle unmanaged mappings the same way. 3764 */ 3765 origpte = pte_load_clear(pte); 3766 KASSERT((origpte & PG_FRAME) == opa, 3767 ("pmap_enter: unexpected pa update for %#x", va)); 3768 if ((origpte & PG_MANAGED) != 0) { 3769 om = PHYS_TO_VM_PAGE(opa); 3770 3771 /* 3772 * The pmap lock is sufficient to synchronize with 3773 * concurrent calls to pmap_page_test_mappings() and 3774 * pmap_ts_referenced(). 3775 */ 3776 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3777 vm_page_dirty(om); 3778 if ((origpte & PG_A) != 0) { 3779 pmap_invalidate_page_int(pmap, va); 3780 vm_page_aflag_set(om, PGA_REFERENCED); 3781 } 3782 pv = pmap_pvh_remove(&om->md, pmap, va); 3783 KASSERT(pv != NULL, 3784 ("pmap_enter: no PV entry for %#x", va)); 3785 if ((newpte & PG_MANAGED) == 0) 3786 free_pv_entry(pmap, pv); 3787 if ((om->a.flags & PGA_WRITEABLE) != 0 && 3788 TAILQ_EMPTY(&om->md.pv_list) && 3789 ((om->flags & PG_FICTITIOUS) != 0 || 3790 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 3791 vm_page_aflag_clear(om, PGA_WRITEABLE); 3792 } else { 3793 /* 3794 * Since this mapping is unmanaged, assume that PG_A 3795 * is set. 3796 */ 3797 pmap_invalidate_page_int(pmap, va); 3798 } 3799 origpte = 0; 3800 } else { 3801 /* 3802 * Increment the counters. 3803 */ 3804 if ((newpte & PG_W) != 0) 3805 pmap->pm_stats.wired_count++; 3806 pmap->pm_stats.resident_count++; 3807 } 3808 3809 /* 3810 * Enter on the PV list if part of our managed memory. 3811 */ 3812 if ((newpte & PG_MANAGED) != 0) { 3813 if (pv == NULL) { 3814 pv = get_pv_entry(pmap, FALSE); 3815 pv->pv_va = va; 3816 } 3817 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3818 if ((newpte & PG_RW) != 0) 3819 vm_page_aflag_set(m, PGA_WRITEABLE); 3820 } 3821 3822 /* 3823 * Update the PTE. 3824 */ 3825 if ((origpte & PG_V) != 0) { 3826 validate: 3827 origpte = pte_load_store(pte, newpte); 3828 KASSERT((origpte & PG_FRAME) == pa, 3829 ("pmap_enter: unexpected pa update for %#x", va)); 3830 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == 3831 (PG_M | PG_RW)) { 3832 if ((origpte & PG_MANAGED) != 0) 3833 vm_page_dirty(m); 3834 3835 /* 3836 * Although the PTE may still have PG_RW set, TLB 3837 * invalidation may nonetheless be required because 3838 * the PTE no longer has PG_M set. 3839 */ 3840 } 3841 #ifdef PMAP_PAE_COMP 3842 else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { 3843 /* 3844 * This PTE change does not require TLB invalidation. 3845 */ 3846 goto unchanged; 3847 } 3848 #endif 3849 if ((origpte & PG_A) != 0) 3850 pmap_invalidate_page_int(pmap, va); 3851 } else 3852 pte_store_zero(pte, newpte); 3853 3854 unchanged: 3855 3856 #if VM_NRESERVLEVEL > 0 3857 /* 3858 * If both the page table page and the reservation are fully 3859 * populated, then attempt promotion. 3860 */ 3861 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 3862 pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 && 3863 vm_reserv_level_iffullpop(m) == 0) 3864 pmap_promote_pde(pmap, pde, va); 3865 #endif 3866 3867 rv = KERN_SUCCESS; 3868 out: 3869 sched_unpin(); 3870 rw_wunlock(&pvh_global_lock); 3871 PMAP_UNLOCK(pmap); 3872 return (rv); 3873 } 3874 3875 /* 3876 * Tries to create a read- and/or execute-only 2 or 4 MB page mapping. Returns 3877 * true if successful. Returns false if (1) a mapping already exists at the 3878 * specified virtual address or (2) a PV entry cannot be allocated without 3879 * reclaiming another PV entry. 3880 */ 3881 static bool 3882 pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3883 { 3884 pd_entry_t newpde; 3885 3886 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3887 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | 3888 PG_PS | PG_V; 3889 if ((m->oflags & VPO_UNMANAGED) == 0) 3890 newpde |= PG_MANAGED; 3891 #ifdef PMAP_PAE_COMP 3892 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3893 newpde |= pg_nx; 3894 #endif 3895 if (pmap != kernel_pmap) 3896 newpde |= PG_U; 3897 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | 3898 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL) == 3899 KERN_SUCCESS); 3900 } 3901 3902 /* 3903 * Returns true if every page table entry in the page table page that maps 3904 * the specified kernel virtual address is zero. 3905 */ 3906 static bool 3907 pmap_every_pte_zero(vm_offset_t va) 3908 { 3909 pt_entry_t *pt_end, *pte; 3910 3911 KASSERT((va & PDRMASK) == 0, ("va is misaligned")); 3912 pte = vtopte(va); 3913 for (pt_end = pte + NPTEPG; pte < pt_end; pte++) { 3914 if (*pte != 0) 3915 return (false); 3916 } 3917 return (true); 3918 } 3919 3920 /* 3921 * Tries to create the specified 2 or 4 MB page mapping. Returns KERN_SUCCESS 3922 * if the mapping was created, and either KERN_FAILURE or 3923 * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if 3924 * PMAP_ENTER_NOREPLACE was specified and a mapping already exists at the 3925 * specified virtual address. Returns KERN_RESOURCE_SHORTAGE if 3926 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. 3927 * 3928 * The parameter "m" is only used when creating a managed, writeable mapping. 3929 */ 3930 static int 3931 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, 3932 vm_page_t m) 3933 { 3934 struct spglist free; 3935 pd_entry_t oldpde, *pde; 3936 vm_page_t mt; 3937 3938 rw_assert(&pvh_global_lock, RA_WLOCKED); 3939 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, 3940 ("pmap_enter_pde: newpde is missing PG_M")); 3941 KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0, 3942 ("pmap_enter_pde: cannot create wired user mapping")); 3943 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3944 pde = pmap_pde(pmap, va); 3945 oldpde = *pde; 3946 if ((oldpde & PG_V) != 0) { 3947 if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (pmap != 3948 kernel_pmap || (oldpde & PG_PS) != 0 || 3949 !pmap_every_pte_zero(va))) { 3950 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3951 " in pmap %p", va, pmap); 3952 return (KERN_FAILURE); 3953 } 3954 /* Break the existing mapping(s). */ 3955 SLIST_INIT(&free); 3956 if ((oldpde & PG_PS) != 0) { 3957 /* 3958 * If the PDE resulted from a promotion, then a 3959 * reserved PT page could be freed. 3960 */ 3961 (void)pmap_remove_pde(pmap, pde, va, &free); 3962 if ((oldpde & PG_G) == 0) 3963 pmap_invalidate_pde_page(pmap, va, oldpde); 3964 } else { 3965 if (pmap_remove_ptes(pmap, va, va + NBPDR, &free)) 3966 pmap_invalidate_all_int(pmap); 3967 } 3968 if (pmap != kernel_pmap) { 3969 vm_page_free_pages_toq(&free, true); 3970 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", 3971 pde)); 3972 } else { 3973 KASSERT(SLIST_EMPTY(&free), 3974 ("pmap_enter_pde: freed kernel page table page")); 3975 3976 /* 3977 * Both pmap_remove_pde() and pmap_remove_ptes() will 3978 * leave the kernel page table page zero filled. 3979 */ 3980 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3981 if (pmap_insert_pt_page(pmap, mt, false)) 3982 panic("pmap_enter_pde: trie insert failed"); 3983 } 3984 } 3985 if ((newpde & PG_MANAGED) != 0) { 3986 /* 3987 * Abort this mapping if its PV entry could not be created. 3988 */ 3989 if (!pmap_pv_insert_pde(pmap, va, newpde, flags)) { 3990 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3991 " in pmap %p", va, pmap); 3992 return (KERN_RESOURCE_SHORTAGE); 3993 } 3994 if ((newpde & PG_RW) != 0) { 3995 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 3996 vm_page_aflag_set(mt, PGA_WRITEABLE); 3997 } 3998 } 3999 4000 /* 4001 * Increment counters. 4002 */ 4003 if ((newpde & PG_W) != 0) 4004 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; 4005 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; 4006 4007 /* 4008 * Map the superpage. (This is not a promoted mapping; there will not 4009 * be any lingering 4KB page mappings in the TLB.) 4010 */ 4011 pde_store(pde, newpde); 4012 4013 pmap_pde_mappings++; 4014 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p", 4015 va, pmap); 4016 return (KERN_SUCCESS); 4017 } 4018 4019 /* 4020 * Maps a sequence of resident pages belonging to the same object. 4021 * The sequence begins with the given page m_start. This page is 4022 * mapped at the given virtual address start. Each subsequent page is 4023 * mapped at a virtual address that is offset from start by the same 4024 * amount as the page is offset from m_start within the object. The 4025 * last page in the sequence is the page with the largest offset from 4026 * m_start that can be mapped at a virtual address less than the given 4027 * virtual address end. Not every virtual page between start and end 4028 * is mapped; only those for which a resident page exists with the 4029 * corresponding offset from m_start are mapped. 4030 */ 4031 static void 4032 __CONCAT(PMTYPE, enter_object)(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4033 vm_page_t m_start, vm_prot_t prot) 4034 { 4035 vm_offset_t va; 4036 vm_page_t m, mpte; 4037 vm_pindex_t diff, psize; 4038 4039 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4040 4041 psize = atop(end - start); 4042 mpte = NULL; 4043 m = m_start; 4044 rw_wlock(&pvh_global_lock); 4045 PMAP_LOCK(pmap); 4046 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4047 va = start + ptoa(diff); 4048 if ((va & PDRMASK) == 0 && va + NBPDR <= end && 4049 m->psind == 1 && pg_ps_enabled && 4050 pmap_enter_4mpage(pmap, va, m, prot)) 4051 m = &m[NBPDR / PAGE_SIZE - 1]; 4052 else 4053 mpte = pmap_enter_quick_locked(pmap, va, m, prot, 4054 mpte); 4055 m = TAILQ_NEXT(m, listq); 4056 } 4057 rw_wunlock(&pvh_global_lock); 4058 PMAP_UNLOCK(pmap); 4059 } 4060 4061 /* 4062 * this code makes some *MAJOR* assumptions: 4063 * 1. Current pmap & pmap exists. 4064 * 2. Not wired. 4065 * 3. Read access. 4066 * 4. No page table pages. 4067 * but is *MUCH* faster than pmap_enter... 4068 */ 4069 4070 static void 4071 __CONCAT(PMTYPE, enter_quick)(pmap_t pmap, vm_offset_t va, vm_page_t m, 4072 vm_prot_t prot) 4073 { 4074 4075 rw_wlock(&pvh_global_lock); 4076 PMAP_LOCK(pmap); 4077 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4078 rw_wunlock(&pvh_global_lock); 4079 PMAP_UNLOCK(pmap); 4080 } 4081 4082 static vm_page_t 4083 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4084 vm_prot_t prot, vm_page_t mpte) 4085 { 4086 pt_entry_t newpte, *pte; 4087 4088 KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) || 4089 (m->oflags & VPO_UNMANAGED) != 0, 4090 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 4091 rw_assert(&pvh_global_lock, RA_WLOCKED); 4092 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4093 4094 /* 4095 * In the case that a page table page is not 4096 * resident, we are creating it here. 4097 */ 4098 if (pmap != kernel_pmap) { 4099 u_int ptepindex; 4100 pd_entry_t ptepa; 4101 4102 /* 4103 * Calculate pagetable page index 4104 */ 4105 ptepindex = va >> PDRSHIFT; 4106 if (mpte && (mpte->pindex == ptepindex)) { 4107 mpte->ref_count++; 4108 } else { 4109 /* 4110 * Get the page directory entry 4111 */ 4112 ptepa = pmap->pm_pdir[ptepindex]; 4113 4114 /* 4115 * If the page table page is mapped, we just increment 4116 * the hold count, and activate it. 4117 */ 4118 if (ptepa) { 4119 if (ptepa & PG_PS) 4120 return (NULL); 4121 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 4122 mpte->ref_count++; 4123 } else { 4124 mpte = _pmap_allocpte(pmap, ptepindex, 4125 PMAP_ENTER_NOSLEEP); 4126 if (mpte == NULL) 4127 return (mpte); 4128 } 4129 } 4130 } else { 4131 mpte = NULL; 4132 } 4133 4134 sched_pin(); 4135 pte = pmap_pte_quick(pmap, va); 4136 if (*pte) { 4137 if (mpte != NULL) 4138 mpte->ref_count--; 4139 sched_unpin(); 4140 return (NULL); 4141 } 4142 4143 /* 4144 * Enter on the PV list if part of our managed memory. 4145 */ 4146 if ((m->oflags & VPO_UNMANAGED) == 0 && 4147 !pmap_try_insert_pv_entry(pmap, va, m)) { 4148 if (mpte != NULL) 4149 pmap_abort_ptp(pmap, va, mpte); 4150 sched_unpin(); 4151 return (NULL); 4152 } 4153 4154 /* 4155 * Increment counters 4156 */ 4157 pmap->pm_stats.resident_count++; 4158 4159 newpte = VM_PAGE_TO_PHYS(m) | PG_V | 4160 pmap_cache_bits(pmap, m->md.pat_mode, 0); 4161 if ((m->oflags & VPO_UNMANAGED) == 0) 4162 newpte |= PG_MANAGED; 4163 #ifdef PMAP_PAE_COMP 4164 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 4165 newpte |= pg_nx; 4166 #endif 4167 if (pmap != kernel_pmap) 4168 newpte |= PG_U; 4169 pte_store_zero(pte, newpte); 4170 sched_unpin(); 4171 return (mpte); 4172 } 4173 4174 /* 4175 * Make a temporary mapping for a physical address. This is only intended 4176 * to be used for panic dumps. 4177 */ 4178 static void * 4179 __CONCAT(PMTYPE, kenter_temporary)(vm_paddr_t pa, int i) 4180 { 4181 vm_offset_t va; 4182 4183 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 4184 pmap_kenter(va, pa); 4185 invlpg(va); 4186 return ((void *)crashdumpmap); 4187 } 4188 4189 /* 4190 * This code maps large physical mmap regions into the 4191 * processor address space. Note that some shortcuts 4192 * are taken, but the code works. 4193 */ 4194 static void 4195 __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr, 4196 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 4197 { 4198 pd_entry_t *pde; 4199 vm_paddr_t pa, ptepa; 4200 vm_page_t p; 4201 int pat_mode; 4202 4203 VM_OBJECT_ASSERT_WLOCKED(object); 4204 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4205 ("pmap_object_init_pt: non-device object")); 4206 if (pg_ps_enabled && 4207 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 4208 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4209 return; 4210 p = vm_page_lookup(object, pindex); 4211 KASSERT(vm_page_all_valid(p), 4212 ("pmap_object_init_pt: invalid page %p", p)); 4213 pat_mode = p->md.pat_mode; 4214 4215 /* 4216 * Abort the mapping if the first page is not physically 4217 * aligned to a 2/4MB page boundary. 4218 */ 4219 ptepa = VM_PAGE_TO_PHYS(p); 4220 if (ptepa & (NBPDR - 1)) 4221 return; 4222 4223 /* 4224 * Skip the first page. Abort the mapping if the rest of 4225 * the pages are not physically contiguous or have differing 4226 * memory attributes. 4227 */ 4228 p = TAILQ_NEXT(p, listq); 4229 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 4230 pa += PAGE_SIZE) { 4231 KASSERT(vm_page_all_valid(p), 4232 ("pmap_object_init_pt: invalid page %p", p)); 4233 if (pa != VM_PAGE_TO_PHYS(p) || 4234 pat_mode != p->md.pat_mode) 4235 return; 4236 p = TAILQ_NEXT(p, listq); 4237 } 4238 4239 /* 4240 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 4241 * "size" is a multiple of 2/4M, adding the PAT setting to 4242 * "pa" will not affect the termination of this loop. 4243 */ 4244 PMAP_LOCK(pmap); 4245 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); 4246 pa < ptepa + size; pa += NBPDR) { 4247 pde = pmap_pde(pmap, addr); 4248 if (*pde == 0) { 4249 pde_store(pde, pa | PG_PS | PG_M | PG_A | 4250 PG_U | PG_RW | PG_V); 4251 pmap->pm_stats.resident_count += NBPDR / 4252 PAGE_SIZE; 4253 pmap_pde_mappings++; 4254 } 4255 /* Else continue on if the PDE is already valid. */ 4256 addr += NBPDR; 4257 } 4258 PMAP_UNLOCK(pmap); 4259 } 4260 } 4261 4262 /* 4263 * Clear the wired attribute from the mappings for the specified range of 4264 * addresses in the given pmap. Every valid mapping within that range 4265 * must have the wired attribute set. In contrast, invalid mappings 4266 * cannot have the wired attribute set, so they are ignored. 4267 * 4268 * The wired attribute of the page table entry is not a hardware feature, 4269 * so there is no need to invalidate any TLB entries. 4270 */ 4271 static void 4272 __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4273 { 4274 vm_offset_t pdnxt; 4275 pd_entry_t *pde; 4276 pt_entry_t *pte; 4277 boolean_t pv_lists_locked; 4278 4279 if (pmap_is_current(pmap)) 4280 pv_lists_locked = FALSE; 4281 else { 4282 pv_lists_locked = TRUE; 4283 resume: 4284 rw_wlock(&pvh_global_lock); 4285 sched_pin(); 4286 } 4287 PMAP_LOCK(pmap); 4288 for (; sva < eva; sva = pdnxt) { 4289 pdnxt = (sva + NBPDR) & ~PDRMASK; 4290 if (pdnxt < sva) 4291 pdnxt = eva; 4292 pde = pmap_pde(pmap, sva); 4293 if ((*pde & PG_V) == 0) 4294 continue; 4295 if ((*pde & PG_PS) != 0) { 4296 if ((*pde & PG_W) == 0) 4297 panic("pmap_unwire: pde %#jx is missing PG_W", 4298 (uintmax_t)*pde); 4299 4300 /* 4301 * Are we unwiring the entire large page? If not, 4302 * demote the mapping and fall through. 4303 */ 4304 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 4305 /* 4306 * Regardless of whether a pde (or pte) is 32 4307 * or 64 bits in size, PG_W is among the least 4308 * significant 32 bits. 4309 */ 4310 atomic_clear_int((u_int *)pde, PG_W); 4311 pmap->pm_stats.wired_count -= NBPDR / 4312 PAGE_SIZE; 4313 continue; 4314 } else { 4315 if (!pv_lists_locked) { 4316 pv_lists_locked = TRUE; 4317 if (!rw_try_wlock(&pvh_global_lock)) { 4318 PMAP_UNLOCK(pmap); 4319 /* Repeat sva. */ 4320 goto resume; 4321 } 4322 sched_pin(); 4323 } 4324 if (!pmap_demote_pde(pmap, pde, sva)) 4325 panic("pmap_unwire: demotion failed"); 4326 } 4327 } 4328 if (pdnxt > eva) 4329 pdnxt = eva; 4330 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 4331 sva += PAGE_SIZE) { 4332 if ((*pte & PG_V) == 0) 4333 continue; 4334 if ((*pte & PG_W) == 0) 4335 panic("pmap_unwire: pte %#jx is missing PG_W", 4336 (uintmax_t)*pte); 4337 4338 /* 4339 * PG_W must be cleared atomically. Although the pmap 4340 * lock synchronizes access to PG_W, another processor 4341 * could be setting PG_M and/or PG_A concurrently. 4342 * 4343 * PG_W is among the least significant 32 bits. 4344 */ 4345 atomic_clear_int((u_int *)pte, PG_W); 4346 pmap->pm_stats.wired_count--; 4347 } 4348 } 4349 if (pv_lists_locked) { 4350 sched_unpin(); 4351 rw_wunlock(&pvh_global_lock); 4352 } 4353 PMAP_UNLOCK(pmap); 4354 } 4355 4356 /* 4357 * Copy the range specified by src_addr/len 4358 * from the source map to the range dst_addr/len 4359 * in the destination map. 4360 * 4361 * This routine is only advisory and need not do anything. Since 4362 * current pmap is always the kernel pmap when executing in 4363 * kernel, and we do not copy from the kernel pmap to a user 4364 * pmap, this optimization is not usable in 4/4G full split i386 4365 * world. 4366 */ 4367 4368 static void 4369 __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 4370 vm_size_t len, vm_offset_t src_addr) 4371 { 4372 pt_entry_t *src_pte, *dst_pte, ptetemp; 4373 pd_entry_t srcptepaddr; 4374 vm_page_t dstmpte, srcmpte; 4375 vm_offset_t addr, end_addr, pdnxt; 4376 u_int ptepindex; 4377 4378 if (dst_addr != src_addr) 4379 return; 4380 4381 end_addr = src_addr + len; 4382 4383 rw_wlock(&pvh_global_lock); 4384 if (dst_pmap < src_pmap) { 4385 PMAP_LOCK(dst_pmap); 4386 PMAP_LOCK(src_pmap); 4387 } else { 4388 PMAP_LOCK(src_pmap); 4389 PMAP_LOCK(dst_pmap); 4390 } 4391 sched_pin(); 4392 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 4393 KASSERT(addr < PMAP_TRM_MIN_ADDRESS, 4394 ("pmap_copy: invalid to pmap_copy the trampoline")); 4395 4396 pdnxt = (addr + NBPDR) & ~PDRMASK; 4397 if (pdnxt < addr) 4398 pdnxt = end_addr; 4399 ptepindex = addr >> PDRSHIFT; 4400 4401 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 4402 if (srcptepaddr == 0) 4403 continue; 4404 4405 if (srcptepaddr & PG_PS) { 4406 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) 4407 continue; 4408 if (dst_pmap->pm_pdir[ptepindex] == 0 && 4409 ((srcptepaddr & PG_MANAGED) == 0 || 4410 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, 4411 PMAP_ENTER_NORECLAIM))) { 4412 dst_pmap->pm_pdir[ptepindex] = srcptepaddr & 4413 ~PG_W; 4414 dst_pmap->pm_stats.resident_count += 4415 NBPDR / PAGE_SIZE; 4416 pmap_pde_mappings++; 4417 } 4418 continue; 4419 } 4420 4421 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 4422 KASSERT(srcmpte->ref_count > 0, 4423 ("pmap_copy: source page table page is unused")); 4424 4425 if (pdnxt > end_addr) 4426 pdnxt = end_addr; 4427 4428 src_pte = pmap_pte_quick3(src_pmap, addr); 4429 while (addr < pdnxt) { 4430 ptetemp = *src_pte; 4431 /* 4432 * we only virtual copy managed pages 4433 */ 4434 if ((ptetemp & PG_MANAGED) != 0) { 4435 dstmpte = pmap_allocpte(dst_pmap, addr, 4436 PMAP_ENTER_NOSLEEP); 4437 if (dstmpte == NULL) 4438 goto out; 4439 dst_pte = pmap_pte_quick(dst_pmap, addr); 4440 if (*dst_pte == 0 && 4441 pmap_try_insert_pv_entry(dst_pmap, addr, 4442 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { 4443 /* 4444 * Clear the wired, modified, and 4445 * accessed (referenced) bits 4446 * during the copy. 4447 */ 4448 *dst_pte = ptetemp & ~(PG_W | PG_M | 4449 PG_A); 4450 dst_pmap->pm_stats.resident_count++; 4451 } else { 4452 pmap_abort_ptp(dst_pmap, addr, dstmpte); 4453 goto out; 4454 } 4455 if (dstmpte->ref_count >= srcmpte->ref_count) 4456 break; 4457 } 4458 addr += PAGE_SIZE; 4459 src_pte++; 4460 } 4461 } 4462 out: 4463 sched_unpin(); 4464 rw_wunlock(&pvh_global_lock); 4465 PMAP_UNLOCK(src_pmap); 4466 PMAP_UNLOCK(dst_pmap); 4467 } 4468 4469 /* 4470 * Zero 1 page of virtual memory mapped from a hardware page by the caller. 4471 */ 4472 static __inline void 4473 pagezero(void *page) 4474 { 4475 #if defined(I686_CPU) 4476 if (cpu_class == CPUCLASS_686) { 4477 if (cpu_feature & CPUID_SSE2) 4478 sse2_pagezero(page); 4479 else 4480 i686_pagezero(page); 4481 } else 4482 #endif 4483 bzero(page, PAGE_SIZE); 4484 } 4485 4486 /* 4487 * Zero the specified hardware page. 4488 */ 4489 static void 4490 __CONCAT(PMTYPE, zero_page)(vm_page_t m) 4491 { 4492 pt_entry_t *cmap_pte2; 4493 struct pcpu *pc; 4494 4495 sched_pin(); 4496 pc = get_pcpu(); 4497 cmap_pte2 = pc->pc_cmap_pte2; 4498 mtx_lock(&pc->pc_cmap_lock); 4499 if (*cmap_pte2) 4500 panic("pmap_zero_page: CMAP2 busy"); 4501 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4502 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4503 invlcaddr(pc->pc_cmap_addr2); 4504 pagezero(pc->pc_cmap_addr2); 4505 *cmap_pte2 = 0; 4506 4507 /* 4508 * Unpin the thread before releasing the lock. Otherwise the thread 4509 * could be rescheduled while still bound to the current CPU, only 4510 * to unpin itself immediately upon resuming execution. 4511 */ 4512 sched_unpin(); 4513 mtx_unlock(&pc->pc_cmap_lock); 4514 } 4515 4516 /* 4517 * Zero an area within a single hardware page. off and size must not 4518 * cover an area beyond a single hardware page. 4519 */ 4520 static void 4521 __CONCAT(PMTYPE, zero_page_area)(vm_page_t m, int off, int size) 4522 { 4523 pt_entry_t *cmap_pte2; 4524 struct pcpu *pc; 4525 4526 sched_pin(); 4527 pc = get_pcpu(); 4528 cmap_pte2 = pc->pc_cmap_pte2; 4529 mtx_lock(&pc->pc_cmap_lock); 4530 if (*cmap_pte2) 4531 panic("pmap_zero_page_area: CMAP2 busy"); 4532 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4533 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4534 invlcaddr(pc->pc_cmap_addr2); 4535 if (off == 0 && size == PAGE_SIZE) 4536 pagezero(pc->pc_cmap_addr2); 4537 else 4538 bzero(pc->pc_cmap_addr2 + off, size); 4539 *cmap_pte2 = 0; 4540 sched_unpin(); 4541 mtx_unlock(&pc->pc_cmap_lock); 4542 } 4543 4544 /* 4545 * Copy 1 specified hardware page to another. 4546 */ 4547 static void 4548 __CONCAT(PMTYPE, copy_page)(vm_page_t src, vm_page_t dst) 4549 { 4550 pt_entry_t *cmap_pte1, *cmap_pte2; 4551 struct pcpu *pc; 4552 4553 sched_pin(); 4554 pc = get_pcpu(); 4555 cmap_pte1 = pc->pc_cmap_pte1; 4556 cmap_pte2 = pc->pc_cmap_pte2; 4557 mtx_lock(&pc->pc_cmap_lock); 4558 if (*cmap_pte1) 4559 panic("pmap_copy_page: CMAP1 busy"); 4560 if (*cmap_pte2) 4561 panic("pmap_copy_page: CMAP2 busy"); 4562 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | 4563 pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0); 4564 invlcaddr(pc->pc_cmap_addr1); 4565 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | 4566 pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0); 4567 invlcaddr(pc->pc_cmap_addr2); 4568 bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE); 4569 *cmap_pte1 = 0; 4570 *cmap_pte2 = 0; 4571 sched_unpin(); 4572 mtx_unlock(&pc->pc_cmap_lock); 4573 } 4574 4575 static void 4576 __CONCAT(PMTYPE, copy_pages)(vm_page_t ma[], vm_offset_t a_offset, 4577 vm_page_t mb[], vm_offset_t b_offset, int xfersize) 4578 { 4579 vm_page_t a_pg, b_pg; 4580 char *a_cp, *b_cp; 4581 vm_offset_t a_pg_offset, b_pg_offset; 4582 pt_entry_t *cmap_pte1, *cmap_pte2; 4583 struct pcpu *pc; 4584 int cnt; 4585 4586 sched_pin(); 4587 pc = get_pcpu(); 4588 cmap_pte1 = pc->pc_cmap_pte1; 4589 cmap_pte2 = pc->pc_cmap_pte2; 4590 mtx_lock(&pc->pc_cmap_lock); 4591 if (*cmap_pte1 != 0) 4592 panic("pmap_copy_pages: CMAP1 busy"); 4593 if (*cmap_pte2 != 0) 4594 panic("pmap_copy_pages: CMAP2 busy"); 4595 while (xfersize > 0) { 4596 a_pg = ma[a_offset >> PAGE_SHIFT]; 4597 a_pg_offset = a_offset & PAGE_MASK; 4598 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 4599 b_pg = mb[b_offset >> PAGE_SHIFT]; 4600 b_pg_offset = b_offset & PAGE_MASK; 4601 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 4602 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | 4603 pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0); 4604 invlcaddr(pc->pc_cmap_addr1); 4605 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | 4606 PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0); 4607 invlcaddr(pc->pc_cmap_addr2); 4608 a_cp = pc->pc_cmap_addr1 + a_pg_offset; 4609 b_cp = pc->pc_cmap_addr2 + b_pg_offset; 4610 bcopy(a_cp, b_cp, cnt); 4611 a_offset += cnt; 4612 b_offset += cnt; 4613 xfersize -= cnt; 4614 } 4615 *cmap_pte1 = 0; 4616 *cmap_pte2 = 0; 4617 sched_unpin(); 4618 mtx_unlock(&pc->pc_cmap_lock); 4619 } 4620 4621 /* 4622 * Returns true if the pmap's pv is one of the first 4623 * 16 pvs linked to from this page. This count may 4624 * be changed upwards or downwards in the future; it 4625 * is only necessary that true be returned for a small 4626 * subset of pmaps for proper page aging. 4627 */ 4628 static boolean_t 4629 __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m) 4630 { 4631 struct md_page *pvh; 4632 pv_entry_t pv; 4633 int loops = 0; 4634 boolean_t rv; 4635 4636 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4637 ("pmap_page_exists_quick: page %p is not managed", m)); 4638 rv = FALSE; 4639 rw_wlock(&pvh_global_lock); 4640 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 4641 if (PV_PMAP(pv) == pmap) { 4642 rv = TRUE; 4643 break; 4644 } 4645 loops++; 4646 if (loops >= 16) 4647 break; 4648 } 4649 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 4650 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4651 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4652 if (PV_PMAP(pv) == pmap) { 4653 rv = TRUE; 4654 break; 4655 } 4656 loops++; 4657 if (loops >= 16) 4658 break; 4659 } 4660 } 4661 rw_wunlock(&pvh_global_lock); 4662 return (rv); 4663 } 4664 4665 /* 4666 * pmap_page_wired_mappings: 4667 * 4668 * Return the number of managed mappings to the given physical page 4669 * that are wired. 4670 */ 4671 static int 4672 __CONCAT(PMTYPE, page_wired_mappings)(vm_page_t m) 4673 { 4674 int count; 4675 4676 count = 0; 4677 if ((m->oflags & VPO_UNMANAGED) != 0) 4678 return (count); 4679 rw_wlock(&pvh_global_lock); 4680 count = pmap_pvh_wired_mappings(&m->md, count); 4681 if ((m->flags & PG_FICTITIOUS) == 0) { 4682 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 4683 count); 4684 } 4685 rw_wunlock(&pvh_global_lock); 4686 return (count); 4687 } 4688 4689 /* 4690 * pmap_pvh_wired_mappings: 4691 * 4692 * Return the updated number "count" of managed mappings that are wired. 4693 */ 4694 static int 4695 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 4696 { 4697 pmap_t pmap; 4698 pt_entry_t *pte; 4699 pv_entry_t pv; 4700 4701 rw_assert(&pvh_global_lock, RA_WLOCKED); 4702 sched_pin(); 4703 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4704 pmap = PV_PMAP(pv); 4705 PMAP_LOCK(pmap); 4706 pte = pmap_pte_quick(pmap, pv->pv_va); 4707 if ((*pte & PG_W) != 0) 4708 count++; 4709 PMAP_UNLOCK(pmap); 4710 } 4711 sched_unpin(); 4712 return (count); 4713 } 4714 4715 /* 4716 * Returns TRUE if the given page is mapped individually or as part of 4717 * a 4mpage. Otherwise, returns FALSE. 4718 */ 4719 static boolean_t 4720 __CONCAT(PMTYPE, page_is_mapped)(vm_page_t m) 4721 { 4722 boolean_t rv; 4723 4724 if ((m->oflags & VPO_UNMANAGED) != 0) 4725 return (FALSE); 4726 rw_wlock(&pvh_global_lock); 4727 rv = !TAILQ_EMPTY(&m->md.pv_list) || 4728 ((m->flags & PG_FICTITIOUS) == 0 && 4729 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 4730 rw_wunlock(&pvh_global_lock); 4731 return (rv); 4732 } 4733 4734 /* 4735 * Remove all pages from specified address space 4736 * this aids process exit speeds. Also, this code 4737 * is special cased for current process only, but 4738 * can have the more generic (and slightly slower) 4739 * mode enabled. This is much faster than pmap_remove 4740 * in the case of running down an entire address space. 4741 */ 4742 static void 4743 __CONCAT(PMTYPE, remove_pages)(pmap_t pmap) 4744 { 4745 pt_entry_t *pte, tpte; 4746 vm_page_t m, mpte, mt; 4747 pv_entry_t pv; 4748 struct md_page *pvh; 4749 struct pv_chunk *pc, *npc; 4750 struct spglist free; 4751 int field, idx; 4752 int32_t bit; 4753 uint32_t inuse, bitmask; 4754 int allfree; 4755 4756 if (pmap != PCPU_GET(curpmap)) { 4757 printf("warning: pmap_remove_pages called with non-current pmap\n"); 4758 return; 4759 } 4760 SLIST_INIT(&free); 4761 rw_wlock(&pvh_global_lock); 4762 PMAP_LOCK(pmap); 4763 sched_pin(); 4764 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4765 KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, 4766 pc->pc_pmap)); 4767 allfree = 1; 4768 for (field = 0; field < _NPCM; field++) { 4769 inuse = ~pc->pc_map[field] & pc_freemask[field]; 4770 while (inuse != 0) { 4771 bit = bsfl(inuse); 4772 bitmask = 1UL << bit; 4773 idx = field * 32 + bit; 4774 pv = &pc->pc_pventry[idx]; 4775 inuse &= ~bitmask; 4776 4777 pte = pmap_pde(pmap, pv->pv_va); 4778 tpte = *pte; 4779 if ((tpte & PG_PS) == 0) { 4780 pte = pmap_pte_quick(pmap, pv->pv_va); 4781 tpte = *pte & ~PG_PTE_PAT; 4782 } 4783 4784 if (tpte == 0) { 4785 printf( 4786 "TPTE at %p IS ZERO @ VA %08x\n", 4787 pte, pv->pv_va); 4788 panic("bad pte"); 4789 } 4790 4791 /* 4792 * We cannot remove wired pages from a process' mapping at this time 4793 */ 4794 if (tpte & PG_W) { 4795 allfree = 0; 4796 continue; 4797 } 4798 4799 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 4800 KASSERT(m->phys_addr == (tpte & PG_FRAME), 4801 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 4802 m, (uintmax_t)m->phys_addr, 4803 (uintmax_t)tpte)); 4804 4805 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4806 m < &vm_page_array[vm_page_array_size], 4807 ("pmap_remove_pages: bad tpte %#jx", 4808 (uintmax_t)tpte)); 4809 4810 pte_clear(pte); 4811 4812 /* 4813 * Update the vm_page_t clean/reference bits. 4814 */ 4815 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 4816 if ((tpte & PG_PS) != 0) { 4817 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4818 vm_page_dirty(mt); 4819 } else 4820 vm_page_dirty(m); 4821 } 4822 4823 /* Mark free */ 4824 PV_STAT(pv_entry_frees++); 4825 PV_STAT(pv_entry_spare++); 4826 pv_entry_count--; 4827 pc->pc_map[field] |= bitmask; 4828 if ((tpte & PG_PS) != 0) { 4829 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 4830 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 4831 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4832 if (TAILQ_EMPTY(&pvh->pv_list)) { 4833 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4834 if (TAILQ_EMPTY(&mt->md.pv_list)) 4835 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4836 } 4837 mpte = pmap_remove_pt_page(pmap, pv->pv_va); 4838 if (mpte != NULL) { 4839 KASSERT(vm_page_all_valid(mpte), 4840 ("pmap_remove_pages: pte page not promoted")); 4841 pmap->pm_stats.resident_count--; 4842 KASSERT(mpte->ref_count == NPTEPG, 4843 ("pmap_remove_pages: pte page ref count error")); 4844 mpte->ref_count = 0; 4845 pmap_add_delayed_free_list(mpte, &free, FALSE); 4846 } 4847 } else { 4848 pmap->pm_stats.resident_count--; 4849 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4850 if (TAILQ_EMPTY(&m->md.pv_list) && 4851 (m->flags & PG_FICTITIOUS) == 0) { 4852 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4853 if (TAILQ_EMPTY(&pvh->pv_list)) 4854 vm_page_aflag_clear(m, PGA_WRITEABLE); 4855 } 4856 pmap_unuse_pt(pmap, pv->pv_va, &free); 4857 } 4858 } 4859 } 4860 if (allfree) { 4861 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4862 free_pv_chunk(pc); 4863 } 4864 } 4865 sched_unpin(); 4866 pmap_invalidate_all_int(pmap); 4867 rw_wunlock(&pvh_global_lock); 4868 PMAP_UNLOCK(pmap); 4869 vm_page_free_pages_toq(&free, true); 4870 } 4871 4872 /* 4873 * pmap_is_modified: 4874 * 4875 * Return whether or not the specified physical page was modified 4876 * in any physical maps. 4877 */ 4878 static boolean_t 4879 __CONCAT(PMTYPE, is_modified)(vm_page_t m) 4880 { 4881 boolean_t rv; 4882 4883 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4884 ("pmap_is_modified: page %p is not managed", m)); 4885 4886 /* 4887 * If the page is not busied then this check is racy. 4888 */ 4889 if (!pmap_page_is_write_mapped(m)) 4890 return (FALSE); 4891 rw_wlock(&pvh_global_lock); 4892 rv = pmap_is_modified_pvh(&m->md) || 4893 ((m->flags & PG_FICTITIOUS) == 0 && 4894 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4895 rw_wunlock(&pvh_global_lock); 4896 return (rv); 4897 } 4898 4899 /* 4900 * Returns TRUE if any of the given mappings were used to modify 4901 * physical memory. Otherwise, returns FALSE. Both page and 2mpage 4902 * mappings are supported. 4903 */ 4904 static boolean_t 4905 pmap_is_modified_pvh(struct md_page *pvh) 4906 { 4907 pv_entry_t pv; 4908 pt_entry_t *pte; 4909 pmap_t pmap; 4910 boolean_t rv; 4911 4912 rw_assert(&pvh_global_lock, RA_WLOCKED); 4913 rv = FALSE; 4914 sched_pin(); 4915 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4916 pmap = PV_PMAP(pv); 4917 PMAP_LOCK(pmap); 4918 pte = pmap_pte_quick(pmap, pv->pv_va); 4919 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW); 4920 PMAP_UNLOCK(pmap); 4921 if (rv) 4922 break; 4923 } 4924 sched_unpin(); 4925 return (rv); 4926 } 4927 4928 /* 4929 * pmap_is_prefaultable: 4930 * 4931 * Return whether or not the specified virtual address is elgible 4932 * for prefault. 4933 */ 4934 static boolean_t 4935 __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr) 4936 { 4937 pd_entry_t pde; 4938 boolean_t rv; 4939 4940 rv = FALSE; 4941 PMAP_LOCK(pmap); 4942 pde = *pmap_pde(pmap, addr); 4943 if (pde != 0 && (pde & PG_PS) == 0) 4944 rv = pmap_pte_ufast(pmap, addr, pde) == 0; 4945 PMAP_UNLOCK(pmap); 4946 return (rv); 4947 } 4948 4949 /* 4950 * pmap_is_referenced: 4951 * 4952 * Return whether or not the specified physical page was referenced 4953 * in any physical maps. 4954 */ 4955 static boolean_t 4956 __CONCAT(PMTYPE, is_referenced)(vm_page_t m) 4957 { 4958 boolean_t rv; 4959 4960 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4961 ("pmap_is_referenced: page %p is not managed", m)); 4962 rw_wlock(&pvh_global_lock); 4963 rv = pmap_is_referenced_pvh(&m->md) || 4964 ((m->flags & PG_FICTITIOUS) == 0 && 4965 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4966 rw_wunlock(&pvh_global_lock); 4967 return (rv); 4968 } 4969 4970 /* 4971 * Returns TRUE if any of the given mappings were referenced and FALSE 4972 * otherwise. Both page and 4mpage mappings are supported. 4973 */ 4974 static boolean_t 4975 pmap_is_referenced_pvh(struct md_page *pvh) 4976 { 4977 pv_entry_t pv; 4978 pt_entry_t *pte; 4979 pmap_t pmap; 4980 boolean_t rv; 4981 4982 rw_assert(&pvh_global_lock, RA_WLOCKED); 4983 rv = FALSE; 4984 sched_pin(); 4985 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4986 pmap = PV_PMAP(pv); 4987 PMAP_LOCK(pmap); 4988 pte = pmap_pte_quick(pmap, pv->pv_va); 4989 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 4990 PMAP_UNLOCK(pmap); 4991 if (rv) 4992 break; 4993 } 4994 sched_unpin(); 4995 return (rv); 4996 } 4997 4998 /* 4999 * Clear the write and modified bits in each of the given page's mappings. 5000 */ 5001 static void 5002 __CONCAT(PMTYPE, remove_write)(vm_page_t m) 5003 { 5004 struct md_page *pvh; 5005 pv_entry_t next_pv, pv; 5006 pmap_t pmap; 5007 pd_entry_t *pde; 5008 pt_entry_t oldpte, *pte; 5009 vm_offset_t va; 5010 5011 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5012 ("pmap_remove_write: page %p is not managed", m)); 5013 vm_page_assert_busied(m); 5014 5015 if (!pmap_page_is_write_mapped(m)) 5016 return; 5017 rw_wlock(&pvh_global_lock); 5018 sched_pin(); 5019 if ((m->flags & PG_FICTITIOUS) != 0) 5020 goto small_mappings; 5021 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5022 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5023 va = pv->pv_va; 5024 pmap = PV_PMAP(pv); 5025 PMAP_LOCK(pmap); 5026 pde = pmap_pde(pmap, va); 5027 if ((*pde & PG_RW) != 0) 5028 (void)pmap_demote_pde(pmap, pde, va); 5029 PMAP_UNLOCK(pmap); 5030 } 5031 small_mappings: 5032 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5033 pmap = PV_PMAP(pv); 5034 PMAP_LOCK(pmap); 5035 pde = pmap_pde(pmap, pv->pv_va); 5036 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found" 5037 " a 4mpage in page %p's pv list", m)); 5038 pte = pmap_pte_quick(pmap, pv->pv_va); 5039 retry: 5040 oldpte = *pte; 5041 if ((oldpte & PG_RW) != 0) { 5042 /* 5043 * Regardless of whether a pte is 32 or 64 bits 5044 * in size, PG_RW and PG_M are among the least 5045 * significant 32 bits. 5046 */ 5047 if (!atomic_cmpset_int((u_int *)pte, oldpte, 5048 oldpte & ~(PG_RW | PG_M))) 5049 goto retry; 5050 if ((oldpte & PG_M) != 0) 5051 vm_page_dirty(m); 5052 pmap_invalidate_page_int(pmap, pv->pv_va); 5053 } 5054 PMAP_UNLOCK(pmap); 5055 } 5056 vm_page_aflag_clear(m, PGA_WRITEABLE); 5057 sched_unpin(); 5058 rw_wunlock(&pvh_global_lock); 5059 } 5060 5061 /* 5062 * pmap_ts_referenced: 5063 * 5064 * Return a count of reference bits for a page, clearing those bits. 5065 * It is not necessary for every reference bit to be cleared, but it 5066 * is necessary that 0 only be returned when there are truly no 5067 * reference bits set. 5068 * 5069 * As an optimization, update the page's dirty field if a modified bit is 5070 * found while counting reference bits. This opportunistic update can be 5071 * performed at low cost and can eliminate the need for some future calls 5072 * to pmap_is_modified(). However, since this function stops after 5073 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5074 * dirty pages. Those dirty pages will only be detected by a future call 5075 * to pmap_is_modified(). 5076 */ 5077 static int 5078 __CONCAT(PMTYPE, ts_referenced)(vm_page_t m) 5079 { 5080 struct md_page *pvh; 5081 pv_entry_t pv, pvf; 5082 pmap_t pmap; 5083 pd_entry_t *pde; 5084 pt_entry_t *pte; 5085 vm_paddr_t pa; 5086 int rtval = 0; 5087 5088 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5089 ("pmap_ts_referenced: page %p is not managed", m)); 5090 pa = VM_PAGE_TO_PHYS(m); 5091 pvh = pa_to_pvh(pa); 5092 rw_wlock(&pvh_global_lock); 5093 sched_pin(); 5094 if ((m->flags & PG_FICTITIOUS) != 0 || 5095 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5096 goto small_mappings; 5097 pv = pvf; 5098 do { 5099 pmap = PV_PMAP(pv); 5100 PMAP_LOCK(pmap); 5101 pde = pmap_pde(pmap, pv->pv_va); 5102 if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5103 /* 5104 * Although "*pde" is mapping a 2/4MB page, because 5105 * this function is called at a 4KB page granularity, 5106 * we only update the 4KB page under test. 5107 */ 5108 vm_page_dirty(m); 5109 } 5110 if ((*pde & PG_A) != 0) { 5111 /* 5112 * Since this reference bit is shared by either 1024 5113 * or 512 4KB pages, it should not be cleared every 5114 * time it is tested. Apply a simple "hash" function 5115 * on the physical page number, the virtual superpage 5116 * number, and the pmap address to select one 4KB page 5117 * out of the 1024 or 512 on which testing the 5118 * reference bit will result in clearing that bit. 5119 * This function is designed to avoid the selection of 5120 * the same 4KB page for every 2- or 4MB page mapping. 5121 * 5122 * On demotion, a mapping that hasn't been referenced 5123 * is simply destroyed. To avoid the possibility of a 5124 * subsequent page fault on a demoted wired mapping, 5125 * always leave its reference bit set. Moreover, 5126 * since the superpage is wired, the current state of 5127 * its reference bit won't affect page replacement. 5128 */ 5129 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ 5130 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 5131 (*pde & PG_W) == 0) { 5132 atomic_clear_int((u_int *)pde, PG_A); 5133 pmap_invalidate_page_int(pmap, pv->pv_va); 5134 } 5135 rtval++; 5136 } 5137 PMAP_UNLOCK(pmap); 5138 /* Rotate the PV list if it has more than one entry. */ 5139 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5140 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5141 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5142 } 5143 if (rtval >= PMAP_TS_REFERENCED_MAX) 5144 goto out; 5145 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5146 small_mappings: 5147 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5148 goto out; 5149 pv = pvf; 5150 do { 5151 pmap = PV_PMAP(pv); 5152 PMAP_LOCK(pmap); 5153 pde = pmap_pde(pmap, pv->pv_va); 5154 KASSERT((*pde & PG_PS) == 0, 5155 ("pmap_ts_referenced: found a 4mpage in page %p's pv list", 5156 m)); 5157 pte = pmap_pte_quick(pmap, pv->pv_va); 5158 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5159 vm_page_dirty(m); 5160 if ((*pte & PG_A) != 0) { 5161 atomic_clear_int((u_int *)pte, PG_A); 5162 pmap_invalidate_page_int(pmap, pv->pv_va); 5163 rtval++; 5164 } 5165 PMAP_UNLOCK(pmap); 5166 /* Rotate the PV list if it has more than one entry. */ 5167 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5168 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5169 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5170 } 5171 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5172 PMAP_TS_REFERENCED_MAX); 5173 out: 5174 sched_unpin(); 5175 rw_wunlock(&pvh_global_lock); 5176 return (rtval); 5177 } 5178 5179 /* 5180 * Apply the given advice to the specified range of addresses within the 5181 * given pmap. Depending on the advice, clear the referenced and/or 5182 * modified flags in each mapping and set the mapped page's dirty field. 5183 */ 5184 static void 5185 __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 5186 int advice) 5187 { 5188 pd_entry_t oldpde, *pde; 5189 pt_entry_t *pte; 5190 vm_offset_t va, pdnxt; 5191 vm_page_t m; 5192 bool anychanged, pv_lists_locked; 5193 5194 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5195 return; 5196 if (pmap_is_current(pmap)) 5197 pv_lists_locked = false; 5198 else { 5199 pv_lists_locked = true; 5200 resume: 5201 rw_wlock(&pvh_global_lock); 5202 sched_pin(); 5203 } 5204 anychanged = false; 5205 PMAP_LOCK(pmap); 5206 for (; sva < eva; sva = pdnxt) { 5207 pdnxt = (sva + NBPDR) & ~PDRMASK; 5208 if (pdnxt < sva) 5209 pdnxt = eva; 5210 pde = pmap_pde(pmap, sva); 5211 oldpde = *pde; 5212 if ((oldpde & PG_V) == 0) 5213 continue; 5214 else if ((oldpde & PG_PS) != 0) { 5215 if ((oldpde & PG_MANAGED) == 0) 5216 continue; 5217 if (!pv_lists_locked) { 5218 pv_lists_locked = true; 5219 if (!rw_try_wlock(&pvh_global_lock)) { 5220 if (anychanged) 5221 pmap_invalidate_all_int(pmap); 5222 PMAP_UNLOCK(pmap); 5223 goto resume; 5224 } 5225 sched_pin(); 5226 } 5227 if (!pmap_demote_pde(pmap, pde, sva)) { 5228 /* 5229 * The large page mapping was destroyed. 5230 */ 5231 continue; 5232 } 5233 5234 /* 5235 * Unless the page mappings are wired, remove the 5236 * mapping to a single page so that a subsequent 5237 * access may repromote. Choosing the last page 5238 * within the address range [sva, min(pdnxt, eva)) 5239 * generally results in more repromotions. Since the 5240 * underlying page table page is fully populated, this 5241 * removal never frees a page table page. 5242 */ 5243 if ((oldpde & PG_W) == 0) { 5244 va = eva; 5245 if (va > pdnxt) 5246 va = pdnxt; 5247 va -= PAGE_SIZE; 5248 KASSERT(va >= sva, 5249 ("pmap_advise: no address gap")); 5250 pte = pmap_pte_quick(pmap, va); 5251 KASSERT((*pte & PG_V) != 0, 5252 ("pmap_advise: invalid PTE")); 5253 pmap_remove_pte(pmap, pte, va, NULL); 5254 anychanged = true; 5255 } 5256 } 5257 if (pdnxt > eva) 5258 pdnxt = eva; 5259 va = pdnxt; 5260 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 5261 sva += PAGE_SIZE) { 5262 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) 5263 goto maybe_invlrng; 5264 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5265 if (advice == MADV_DONTNEED) { 5266 /* 5267 * Future calls to pmap_is_modified() 5268 * can be avoided by making the page 5269 * dirty now. 5270 */ 5271 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 5272 vm_page_dirty(m); 5273 } 5274 atomic_clear_int((u_int *)pte, PG_M | PG_A); 5275 } else if ((*pte & PG_A) != 0) 5276 atomic_clear_int((u_int *)pte, PG_A); 5277 else 5278 goto maybe_invlrng; 5279 if ((*pte & PG_G) != 0) { 5280 if (va == pdnxt) 5281 va = sva; 5282 } else 5283 anychanged = true; 5284 continue; 5285 maybe_invlrng: 5286 if (va != pdnxt) { 5287 pmap_invalidate_range_int(pmap, va, sva); 5288 va = pdnxt; 5289 } 5290 } 5291 if (va != pdnxt) 5292 pmap_invalidate_range_int(pmap, va, sva); 5293 } 5294 if (anychanged) 5295 pmap_invalidate_all_int(pmap); 5296 if (pv_lists_locked) { 5297 sched_unpin(); 5298 rw_wunlock(&pvh_global_lock); 5299 } 5300 PMAP_UNLOCK(pmap); 5301 } 5302 5303 /* 5304 * Clear the modify bits on the specified physical page. 5305 */ 5306 static void 5307 __CONCAT(PMTYPE, clear_modify)(vm_page_t m) 5308 { 5309 struct md_page *pvh; 5310 pv_entry_t next_pv, pv; 5311 pmap_t pmap; 5312 pd_entry_t oldpde, *pde; 5313 pt_entry_t *pte; 5314 vm_offset_t va; 5315 5316 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5317 ("pmap_clear_modify: page %p is not managed", m)); 5318 vm_page_assert_busied(m); 5319 5320 if (!pmap_page_is_write_mapped(m)) 5321 return; 5322 rw_wlock(&pvh_global_lock); 5323 sched_pin(); 5324 if ((m->flags & PG_FICTITIOUS) != 0) 5325 goto small_mappings; 5326 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5327 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5328 va = pv->pv_va; 5329 pmap = PV_PMAP(pv); 5330 PMAP_LOCK(pmap); 5331 pde = pmap_pde(pmap, va); 5332 oldpde = *pde; 5333 /* If oldpde has PG_RW set, then it also has PG_M set. */ 5334 if ((oldpde & PG_RW) != 0 && 5335 pmap_demote_pde(pmap, pde, va) && 5336 (oldpde & PG_W) == 0) { 5337 /* 5338 * Write protect the mapping to a single page so that 5339 * a subsequent write access may repromote. 5340 */ 5341 va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); 5342 pte = pmap_pte_quick(pmap, va); 5343 /* 5344 * Regardless of whether a pte is 32 or 64 bits 5345 * in size, PG_RW and PG_M are among the least 5346 * significant 32 bits. 5347 */ 5348 atomic_clear_int((u_int *)pte, PG_M | PG_RW); 5349 vm_page_dirty(m); 5350 pmap_invalidate_page_int(pmap, va); 5351 } 5352 PMAP_UNLOCK(pmap); 5353 } 5354 small_mappings: 5355 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5356 pmap = PV_PMAP(pv); 5357 PMAP_LOCK(pmap); 5358 pde = pmap_pde(pmap, pv->pv_va); 5359 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" 5360 " a 4mpage in page %p's pv list", m)); 5361 pte = pmap_pte_quick(pmap, pv->pv_va); 5362 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5363 /* 5364 * Regardless of whether a pte is 32 or 64 bits 5365 * in size, PG_M is among the least significant 5366 * 32 bits. 5367 */ 5368 atomic_clear_int((u_int *)pte, PG_M); 5369 pmap_invalidate_page_int(pmap, pv->pv_va); 5370 } 5371 PMAP_UNLOCK(pmap); 5372 } 5373 sched_unpin(); 5374 rw_wunlock(&pvh_global_lock); 5375 } 5376 5377 /* 5378 * Miscellaneous support routines follow 5379 */ 5380 5381 /* Adjust the cache mode for a 4KB page mapped via a PTE. */ 5382 static __inline void 5383 pmap_pte_attr(pt_entry_t *pte, int cache_bits) 5384 { 5385 u_int opte, npte; 5386 5387 /* 5388 * The cache mode bits are all in the low 32-bits of the 5389 * PTE, so we can just spin on updating the low 32-bits. 5390 */ 5391 do { 5392 opte = *(u_int *)pte; 5393 npte = opte & ~PG_PTE_CACHE; 5394 npte |= cache_bits; 5395 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); 5396 } 5397 5398 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */ 5399 static __inline void 5400 pmap_pde_attr(pd_entry_t *pde, int cache_bits) 5401 { 5402 u_int opde, npde; 5403 5404 /* 5405 * The cache mode bits are all in the low 32-bits of the 5406 * PDE, so we can just spin on updating the low 32-bits. 5407 */ 5408 do { 5409 opde = *(u_int *)pde; 5410 npde = opde & ~PG_PDE_CACHE; 5411 npde |= cache_bits; 5412 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); 5413 } 5414 5415 /* 5416 * Map a set of physical memory pages into the kernel virtual 5417 * address space. Return a pointer to where it is mapped. This 5418 * routine is intended to be used for mapping device memory, 5419 * NOT real memory. 5420 */ 5421 static void * 5422 __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode, 5423 int flags) 5424 { 5425 struct pmap_preinit_mapping *ppim; 5426 vm_offset_t va, offset; 5427 vm_page_t m; 5428 vm_size_t tmpsize; 5429 int i; 5430 5431 offset = pa & PAGE_MASK; 5432 size = round_page(offset + size); 5433 pa = pa & PG_FRAME; 5434 5435 if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) { 5436 va = pa + PMAP_MAP_LOW; 5437 if ((flags & MAPDEV_SETATTR) == 0) 5438 return ((void *)(va + offset)); 5439 } else if (!pmap_initialized) { 5440 va = 0; 5441 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5442 ppim = pmap_preinit_mapping + i; 5443 if (ppim->va == 0) { 5444 ppim->pa = pa; 5445 ppim->sz = size; 5446 ppim->mode = mode; 5447 ppim->va = virtual_avail; 5448 virtual_avail += size; 5449 va = ppim->va; 5450 break; 5451 } 5452 } 5453 if (va == 0) 5454 panic("%s: too many preinit mappings", __func__); 5455 } else { 5456 /* 5457 * If we have a preinit mapping, re-use it. 5458 */ 5459 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5460 ppim = pmap_preinit_mapping + i; 5461 if (ppim->pa == pa && ppim->sz == size && 5462 (ppim->mode == mode || 5463 (flags & MAPDEV_SETATTR) == 0)) 5464 return ((void *)(ppim->va + offset)); 5465 } 5466 va = kva_alloc(size); 5467 if (va == 0) 5468 panic("%s: Couldn't allocate KVA", __func__); 5469 } 5470 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) { 5471 if ((flags & MAPDEV_SETATTR) == 0 && pmap_initialized) { 5472 m = PHYS_TO_VM_PAGE(pa); 5473 if (m != NULL && VM_PAGE_TO_PHYS(m) == pa) { 5474 pmap_kenter_attr(va + tmpsize, pa + tmpsize, 5475 m->md.pat_mode); 5476 continue; 5477 } 5478 } 5479 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 5480 } 5481 pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize); 5482 pmap_invalidate_cache_range(va, va + size); 5483 return ((void *)(va + offset)); 5484 } 5485 5486 static void 5487 __CONCAT(PMTYPE, unmapdev)(void *p, vm_size_t size) 5488 { 5489 struct pmap_preinit_mapping *ppim; 5490 vm_offset_t offset, va; 5491 int i; 5492 5493 va = (vm_offset_t)p; 5494 if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE) 5495 return; 5496 offset = va & PAGE_MASK; 5497 size = round_page(offset + size); 5498 va = trunc_page(va); 5499 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5500 ppim = pmap_preinit_mapping + i; 5501 if (ppim->va == va && ppim->sz == size) { 5502 if (pmap_initialized) 5503 return; 5504 ppim->pa = 0; 5505 ppim->va = 0; 5506 ppim->sz = 0; 5507 ppim->mode = 0; 5508 if (va + size == virtual_avail) 5509 virtual_avail = va; 5510 return; 5511 } 5512 } 5513 if (pmap_initialized) { 5514 pmap_qremove(va, atop(size)); 5515 kva_free(va, size); 5516 } 5517 } 5518 5519 /* 5520 * Sets the memory attribute for the specified page. 5521 */ 5522 static void 5523 __CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma) 5524 { 5525 5526 m->md.pat_mode = ma; 5527 if ((m->flags & PG_FICTITIOUS) != 0) 5528 return; 5529 5530 /* 5531 * If "m" is a normal page, flush it from the cache. 5532 * See pmap_invalidate_cache_range(). 5533 * 5534 * First, try to find an existing mapping of the page by sf 5535 * buffer. sf_buf_invalidate_cache() modifies mapping and 5536 * flushes the cache. 5537 */ 5538 if (sf_buf_invalidate_cache(m)) 5539 return; 5540 5541 /* 5542 * If page is not mapped by sf buffer, but CPU does not 5543 * support self snoop, map the page transient and do 5544 * invalidation. In the worst case, whole cache is flushed by 5545 * pmap_invalidate_cache_range(). 5546 */ 5547 if ((cpu_feature & CPUID_SS) == 0) 5548 pmap_flush_page(m); 5549 } 5550 5551 static void 5552 __CONCAT(PMTYPE, flush_page)(vm_page_t m) 5553 { 5554 pt_entry_t *cmap_pte2; 5555 struct pcpu *pc; 5556 vm_offset_t sva, eva; 5557 bool useclflushopt; 5558 5559 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0; 5560 if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) { 5561 sched_pin(); 5562 pc = get_pcpu(); 5563 cmap_pte2 = pc->pc_cmap_pte2; 5564 mtx_lock(&pc->pc_cmap_lock); 5565 if (*cmap_pte2) 5566 panic("pmap_flush_page: CMAP2 busy"); 5567 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | 5568 PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 5569 0); 5570 invlcaddr(pc->pc_cmap_addr2); 5571 sva = (vm_offset_t)pc->pc_cmap_addr2; 5572 eva = sva + PAGE_SIZE; 5573 5574 /* 5575 * Use mfence or sfence despite the ordering implied by 5576 * mtx_{un,}lock() because clflush on non-Intel CPUs 5577 * and clflushopt are not guaranteed to be ordered by 5578 * any other instruction. 5579 */ 5580 if (useclflushopt) 5581 sfence(); 5582 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5583 mfence(); 5584 for (; sva < eva; sva += cpu_clflush_line_size) { 5585 if (useclflushopt) 5586 clflushopt(sva); 5587 else 5588 clflush(sva); 5589 } 5590 if (useclflushopt) 5591 sfence(); 5592 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5593 mfence(); 5594 *cmap_pte2 = 0; 5595 sched_unpin(); 5596 mtx_unlock(&pc->pc_cmap_lock); 5597 } else 5598 pmap_invalidate_cache(); 5599 } 5600 5601 /* 5602 * Changes the specified virtual address range's memory type to that given by 5603 * the parameter "mode". The specified virtual address range must be 5604 * completely contained within either the kernel map. 5605 * 5606 * Returns zero if the change completed successfully, and either EINVAL or 5607 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 5608 * of the virtual address range was not mapped, and ENOMEM is returned if 5609 * there was insufficient memory available to complete the change. 5610 */ 5611 static int 5612 __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode) 5613 { 5614 vm_offset_t base, offset, tmpva; 5615 pd_entry_t *pde; 5616 pt_entry_t *pte; 5617 int cache_bits_pte, cache_bits_pde; 5618 boolean_t changed; 5619 5620 base = trunc_page(va); 5621 offset = va & PAGE_MASK; 5622 size = round_page(offset + size); 5623 5624 /* 5625 * Only supported on kernel virtual addresses above the recursive map. 5626 */ 5627 if (base < VM_MIN_KERNEL_ADDRESS) 5628 return (EINVAL); 5629 5630 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); 5631 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); 5632 changed = FALSE; 5633 5634 /* 5635 * Pages that aren't mapped aren't supported. Also break down 5636 * 2/4MB pages into 4KB pages if required. 5637 */ 5638 PMAP_LOCK(kernel_pmap); 5639 for (tmpva = base; tmpva < base + size; ) { 5640 pde = pmap_pde(kernel_pmap, tmpva); 5641 if (*pde == 0) { 5642 PMAP_UNLOCK(kernel_pmap); 5643 return (EINVAL); 5644 } 5645 if (*pde & PG_PS) { 5646 /* 5647 * If the current 2/4MB page already has 5648 * the required memory type, then we need not 5649 * demote this page. Just increment tmpva to 5650 * the next 2/4MB page frame. 5651 */ 5652 if ((*pde & PG_PDE_CACHE) == cache_bits_pde) { 5653 tmpva = trunc_4mpage(tmpva) + NBPDR; 5654 continue; 5655 } 5656 5657 /* 5658 * If the current offset aligns with a 2/4MB 5659 * page frame and there is at least 2/4MB left 5660 * within the range, then we need not break 5661 * down this page into 4KB pages. 5662 */ 5663 if ((tmpva & PDRMASK) == 0 && 5664 tmpva + PDRMASK < base + size) { 5665 tmpva += NBPDR; 5666 continue; 5667 } 5668 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) { 5669 PMAP_UNLOCK(kernel_pmap); 5670 return (ENOMEM); 5671 } 5672 } 5673 pte = vtopte(tmpva); 5674 if (*pte == 0) { 5675 PMAP_UNLOCK(kernel_pmap); 5676 return (EINVAL); 5677 } 5678 tmpva += PAGE_SIZE; 5679 } 5680 PMAP_UNLOCK(kernel_pmap); 5681 5682 /* 5683 * Ok, all the pages exist, so run through them updating their 5684 * cache mode if required. 5685 */ 5686 for (tmpva = base; tmpva < base + size; ) { 5687 pde = pmap_pde(kernel_pmap, tmpva); 5688 if (*pde & PG_PS) { 5689 if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { 5690 pmap_pde_attr(pde, cache_bits_pde); 5691 changed = TRUE; 5692 } 5693 tmpva = trunc_4mpage(tmpva) + NBPDR; 5694 } else { 5695 pte = vtopte(tmpva); 5696 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { 5697 pmap_pte_attr(pte, cache_bits_pte); 5698 changed = TRUE; 5699 } 5700 tmpva += PAGE_SIZE; 5701 } 5702 } 5703 5704 /* 5705 * Flush CPU caches to make sure any data isn't cached that 5706 * shouldn't be, etc. 5707 */ 5708 if (changed) { 5709 pmap_invalidate_range_int(kernel_pmap, base, tmpva); 5710 pmap_invalidate_cache_range(base, tmpva); 5711 } 5712 return (0); 5713 } 5714 5715 /* 5716 * Perform the pmap work for mincore(2). If the page is not both referenced and 5717 * modified by this pmap, returns its physical address so that the caller can 5718 * find other mappings. 5719 */ 5720 static int 5721 __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) 5722 { 5723 pd_entry_t pde; 5724 pt_entry_t pte; 5725 vm_paddr_t pa; 5726 int val; 5727 5728 PMAP_LOCK(pmap); 5729 pde = *pmap_pde(pmap, addr); 5730 if (pde != 0) { 5731 if ((pde & PG_PS) != 0) { 5732 pte = pde; 5733 /* Compute the physical address of the 4KB page. */ 5734 pa = ((pde & PG_PS_FRAME) | (addr & PDRMASK)) & 5735 PG_FRAME; 5736 val = MINCORE_PSIND(1); 5737 } else { 5738 pte = pmap_pte_ufast(pmap, addr, pde); 5739 pa = pte & PG_FRAME; 5740 val = 0; 5741 } 5742 } else { 5743 pte = 0; 5744 pa = 0; 5745 val = 0; 5746 } 5747 if ((pte & PG_V) != 0) { 5748 val |= MINCORE_INCORE; 5749 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5750 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 5751 if ((pte & PG_A) != 0) 5752 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 5753 } 5754 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 5755 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 5756 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 5757 *pap = pa; 5758 } 5759 PMAP_UNLOCK(pmap); 5760 return (val); 5761 } 5762 5763 static void 5764 __CONCAT(PMTYPE, activate)(struct thread *td) 5765 { 5766 pmap_t pmap, oldpmap; 5767 u_int cpuid; 5768 u_int32_t cr3; 5769 5770 critical_enter(); 5771 pmap = vmspace_pmap(td->td_proc->p_vmspace); 5772 oldpmap = PCPU_GET(curpmap); 5773 cpuid = PCPU_GET(cpuid); 5774 #if defined(SMP) 5775 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 5776 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5777 #else 5778 CPU_CLR(cpuid, &oldpmap->pm_active); 5779 CPU_SET(cpuid, &pmap->pm_active); 5780 #endif 5781 #ifdef PMAP_PAE_COMP 5782 cr3 = vtophys(pmap->pm_pdpt); 5783 #else 5784 cr3 = vtophys(pmap->pm_pdir); 5785 #endif 5786 /* 5787 * pmap_activate is for the current thread on the current cpu 5788 */ 5789 td->td_pcb->pcb_cr3 = cr3; 5790 PCPU_SET(curpmap, pmap); 5791 critical_exit(); 5792 } 5793 5794 static void 5795 __CONCAT(PMTYPE, activate_boot)(pmap_t pmap) 5796 { 5797 u_int cpuid; 5798 5799 cpuid = PCPU_GET(cpuid); 5800 #if defined(SMP) 5801 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5802 #else 5803 CPU_SET(cpuid, &pmap->pm_active); 5804 #endif 5805 PCPU_SET(curpmap, pmap); 5806 } 5807 5808 /* 5809 * Increase the starting virtual address of the given mapping if a 5810 * different alignment might result in more superpage mappings. 5811 */ 5812 static void 5813 __CONCAT(PMTYPE, align_superpage)(vm_object_t object, vm_ooffset_t offset, 5814 vm_offset_t *addr, vm_size_t size) 5815 { 5816 vm_offset_t superpage_offset; 5817 5818 if (size < NBPDR) 5819 return; 5820 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 5821 offset += ptoa(object->pg_color); 5822 superpage_offset = offset & PDRMASK; 5823 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 5824 (*addr & PDRMASK) == superpage_offset) 5825 return; 5826 if ((*addr & PDRMASK) < superpage_offset) 5827 *addr = (*addr & ~PDRMASK) + superpage_offset; 5828 else 5829 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 5830 } 5831 5832 static vm_offset_t 5833 __CONCAT(PMTYPE, quick_enter_page)(vm_page_t m) 5834 { 5835 vm_offset_t qaddr; 5836 pt_entry_t *pte; 5837 5838 critical_enter(); 5839 qaddr = PCPU_GET(qmap_addr); 5840 pte = vtopte(qaddr); 5841 5842 KASSERT(*pte == 0, 5843 ("pmap_quick_enter_page: PTE busy %#jx", (uintmax_t)*pte)); 5844 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 5845 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0); 5846 invlpg(qaddr); 5847 5848 return (qaddr); 5849 } 5850 5851 static void 5852 __CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr) 5853 { 5854 vm_offset_t qaddr; 5855 pt_entry_t *pte; 5856 5857 qaddr = PCPU_GET(qmap_addr); 5858 pte = vtopte(qaddr); 5859 5860 KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); 5861 KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); 5862 5863 *pte = 0; 5864 critical_exit(); 5865 } 5866 5867 static vmem_t *pmap_trm_arena; 5868 static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS; 5869 static int trm_guard = PAGE_SIZE; 5870 5871 static int 5872 pmap_trm_import(void *unused __unused, vmem_size_t size, int flags, 5873 vmem_addr_t *addrp) 5874 { 5875 vm_page_t m; 5876 vmem_addr_t af, addr, prev_addr; 5877 pt_entry_t *trm_pte; 5878 5879 prev_addr = atomic_load_int(&pmap_trm_arena_last); 5880 size = round_page(size) + trm_guard; 5881 for (;;) { 5882 if (prev_addr + size < prev_addr || prev_addr + size < size || 5883 prev_addr + size > PMAP_TRM_MAX_ADDRESS) 5884 return (ENOMEM); 5885 addr = prev_addr + size; 5886 if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr)) 5887 break; 5888 } 5889 prev_addr += trm_guard; 5890 trm_pte = PTmap + atop(prev_addr); 5891 for (af = prev_addr; af < addr; af += PAGE_SIZE) { 5892 m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK); 5893 pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | 5894 PG_M | PG_A | PG_RW | PG_V | pgeflag | 5895 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); 5896 } 5897 *addrp = prev_addr; 5898 return (0); 5899 } 5900 5901 void 5902 pmap_init_trm(void) 5903 { 5904 vm_page_t pd_m; 5905 5906 TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard); 5907 if ((trm_guard & PAGE_MASK) != 0) 5908 trm_guard = 0; 5909 pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); 5910 vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); 5911 pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK | 5912 VM_ALLOC_ZERO); 5913 PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | 5914 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); 5915 } 5916 5917 static void * 5918 __CONCAT(PMTYPE, trm_alloc)(size_t size, int flags) 5919 { 5920 vmem_addr_t res; 5921 int error; 5922 5923 MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0); 5924 error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int), 5925 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res); 5926 if (error != 0) 5927 return (NULL); 5928 if ((flags & M_ZERO) != 0) 5929 bzero((void *)res, size); 5930 return ((void *)res); 5931 } 5932 5933 static void 5934 __CONCAT(PMTYPE, trm_free)(void *addr, size_t size) 5935 { 5936 5937 vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4)); 5938 } 5939 5940 static void 5941 __CONCAT(PMTYPE, ksetrw)(vm_offset_t va) 5942 { 5943 5944 *vtopte(va) |= PG_RW; 5945 } 5946 5947 static void 5948 __CONCAT(PMTYPE, remap_lowptdi)(bool enable) 5949 { 5950 5951 PTD[KPTDI] = enable ? PTD[LOWPTDI] : 0; 5952 invltlb_glob(); 5953 } 5954 5955 static vm_offset_t 5956 __CONCAT(PMTYPE, get_map_low)(void) 5957 { 5958 5959 return (PMAP_MAP_LOW); 5960 } 5961 5962 static vm_offset_t 5963 __CONCAT(PMTYPE, get_vm_maxuser_address)(void) 5964 { 5965 5966 return (VM_MAXUSER_ADDRESS); 5967 } 5968 5969 static vm_paddr_t 5970 __CONCAT(PMTYPE, pg_frame)(vm_paddr_t pa) 5971 { 5972 5973 return (pa & PG_FRAME); 5974 } 5975 5976 static void 5977 __CONCAT(PMTYPE, sf_buf_map)(struct sf_buf *sf) 5978 { 5979 pt_entry_t opte, *ptep; 5980 5981 /* 5982 * Update the sf_buf's virtual-to-physical mapping, flushing the 5983 * virtual address from the TLB. Since the reference count for 5984 * the sf_buf's old mapping was zero, that mapping is not 5985 * currently in use. Consequently, there is no need to exchange 5986 * the old and new PTEs atomically, even under PAE. 5987 */ 5988 ptep = vtopte(sf->kva); 5989 opte = *ptep; 5990 *ptep = VM_PAGE_TO_PHYS(sf->m) | PG_RW | PG_V | 5991 pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, 0); 5992 5993 /* 5994 * Avoid unnecessary TLB invalidations: If the sf_buf's old 5995 * virtual-to-physical mapping was not used, then any processor 5996 * that has invalidated the sf_buf's virtual address from its TLB 5997 * since the last used mapping need not invalidate again. 5998 */ 5999 #ifdef SMP 6000 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6001 CPU_ZERO(&sf->cpumask); 6002 #else 6003 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6004 pmap_invalidate_page_int(kernel_pmap, sf->kva); 6005 #endif 6006 } 6007 6008 static void 6009 __CONCAT(PMTYPE, cp_slow0_map)(vm_offset_t kaddr, int plen, vm_page_t *ma) 6010 { 6011 pt_entry_t *pte; 6012 int i; 6013 6014 for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) { 6015 *pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) | 6016 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]), 6017 FALSE); 6018 invlpg(kaddr + ptoa(i)); 6019 } 6020 } 6021 6022 static u_int 6023 __CONCAT(PMTYPE, get_kcr3)(void) 6024 { 6025 6026 #ifdef PMAP_PAE_COMP 6027 return ((u_int)IdlePDPT); 6028 #else 6029 return ((u_int)IdlePTD); 6030 #endif 6031 } 6032 6033 static u_int 6034 __CONCAT(PMTYPE, get_cr3)(pmap_t pmap) 6035 { 6036 6037 #ifdef PMAP_PAE_COMP 6038 return ((u_int)vtophys(pmap->pm_pdpt)); 6039 #else 6040 return ((u_int)vtophys(pmap->pm_pdir)); 6041 #endif 6042 } 6043 6044 static caddr_t 6045 __CONCAT(PMTYPE, cmap3)(vm_paddr_t pa, u_int pte_bits) 6046 { 6047 pt_entry_t *pte; 6048 6049 pte = CMAP3; 6050 *pte = pa | pte_bits; 6051 invltlb(); 6052 return (CADDR3); 6053 } 6054 6055 static void 6056 __CONCAT(PMTYPE, basemem_setup)(u_int basemem) 6057 { 6058 pt_entry_t *pte; 6059 int i; 6060 6061 /* 6062 * Map pages between basemem and ISA_HOLE_START, if any, r/w into 6063 * the vm86 page table so that vm86 can scribble on them using 6064 * the vm86 map too. XXX: why 2 ways for this and only 1 way for 6065 * page 0, at least as initialized here? 6066 */ 6067 pte = (pt_entry_t *)vm86paddr; 6068 for (i = basemem / 4; i < 160; i++) 6069 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 6070 } 6071 6072 struct bios16_pmap_handle { 6073 pt_entry_t *pte; 6074 pd_entry_t *ptd; 6075 pt_entry_t orig_ptd; 6076 }; 6077 6078 static void * 6079 __CONCAT(PMTYPE, bios16_enter)(void) 6080 { 6081 struct bios16_pmap_handle *h; 6082 6083 /* 6084 * no page table, so create one and install it. 6085 */ 6086 h = malloc(sizeof(struct bios16_pmap_handle), M_TEMP, M_WAITOK); 6087 h->pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 6088 h->ptd = IdlePTD; 6089 *h->pte = vm86phystk | PG_RW | PG_V; 6090 h->orig_ptd = *h->ptd; 6091 *h->ptd = vtophys(h->pte) | PG_RW | PG_V; 6092 pmap_invalidate_all_int(kernel_pmap); /* XXX insurance for now */ 6093 return (h); 6094 } 6095 6096 static void 6097 __CONCAT(PMTYPE, bios16_leave)(void *arg) 6098 { 6099 struct bios16_pmap_handle *h; 6100 6101 h = arg; 6102 *h->ptd = h->orig_ptd; /* remove page table */ 6103 /* 6104 * XXX only needs to be invlpg(0) but that doesn't work on the 386 6105 */ 6106 pmap_invalidate_all_int(kernel_pmap); 6107 free(h->pte, M_TEMP); /* ... and free it */ 6108 } 6109 6110 struct pmap_kernel_map_range { 6111 vm_offset_t sva; 6112 pt_entry_t attrs; 6113 int ptes; 6114 int pdes; 6115 int pdpes; 6116 }; 6117 6118 static void 6119 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range, 6120 vm_offset_t eva) 6121 { 6122 const char *mode; 6123 int i, pat_idx; 6124 6125 if (eva <= range->sva) 6126 return; 6127 6128 pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true); 6129 for (i = 0; i < PAT_INDEX_SIZE; i++) 6130 if (pat_index[i] == pat_idx) 6131 break; 6132 6133 switch (i) { 6134 case PAT_WRITE_BACK: 6135 mode = "WB"; 6136 break; 6137 case PAT_WRITE_THROUGH: 6138 mode = "WT"; 6139 break; 6140 case PAT_UNCACHEABLE: 6141 mode = "UC"; 6142 break; 6143 case PAT_UNCACHED: 6144 mode = "U-"; 6145 break; 6146 case PAT_WRITE_PROTECTED: 6147 mode = "WP"; 6148 break; 6149 case PAT_WRITE_COMBINING: 6150 mode = "WC"; 6151 break; 6152 default: 6153 printf("%s: unknown PAT mode %#x for range 0x%08x-0x%08x\n", 6154 __func__, pat_idx, range->sva, eva); 6155 mode = "??"; 6156 break; 6157 } 6158 6159 sbuf_printf(sb, "0x%08x-0x%08x r%c%c%c%c %s %d %d %d\n", 6160 range->sva, eva, 6161 (range->attrs & PG_RW) != 0 ? 'w' : '-', 6162 (range->attrs & pg_nx) != 0 ? '-' : 'x', 6163 (range->attrs & PG_U) != 0 ? 'u' : 's', 6164 (range->attrs & PG_G) != 0 ? 'g' : '-', 6165 mode, range->pdpes, range->pdes, range->ptes); 6166 6167 /* Reset to sentinel value. */ 6168 range->sva = 0xffffffff; 6169 } 6170 6171 /* 6172 * Determine whether the attributes specified by a page table entry match those 6173 * being tracked by the current range. This is not quite as simple as a direct 6174 * flag comparison since some PAT modes have multiple representations. 6175 */ 6176 static bool 6177 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs) 6178 { 6179 pt_entry_t diff, mask; 6180 6181 mask = pg_nx | PG_G | PG_RW | PG_U | PG_PDE_CACHE; 6182 diff = (range->attrs ^ attrs) & mask; 6183 if (diff == 0) 6184 return (true); 6185 if ((diff & ~PG_PDE_PAT) == 0 && 6186 pmap_pat_index(kernel_pmap, range->attrs, true) == 6187 pmap_pat_index(kernel_pmap, attrs, true)) 6188 return (true); 6189 return (false); 6190 } 6191 6192 static void 6193 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va, 6194 pt_entry_t attrs) 6195 { 6196 6197 memset(range, 0, sizeof(*range)); 6198 range->sva = va; 6199 range->attrs = attrs; 6200 } 6201 6202 /* 6203 * Given a leaf PTE, derive the mapping's attributes. If they do not match 6204 * those of the current run, dump the address range and its attributes, and 6205 * begin a new run. 6206 */ 6207 static void 6208 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, 6209 vm_offset_t va, pd_entry_t pde, pt_entry_t pte) 6210 { 6211 pt_entry_t attrs; 6212 6213 attrs = pde & (PG_RW | PG_U | pg_nx); 6214 6215 if ((pde & PG_PS) != 0) { 6216 attrs |= pde & (PG_G | PG_PDE_CACHE); 6217 } else if (pte != 0) { 6218 attrs |= pte & pg_nx; 6219 attrs &= pg_nx | (pte & (PG_RW | PG_U)); 6220 attrs |= pte & (PG_G | PG_PTE_CACHE); 6221 6222 /* Canonicalize by always using the PDE PAT bit. */ 6223 if ((attrs & PG_PTE_PAT) != 0) 6224 attrs ^= PG_PDE_PAT | PG_PTE_PAT; 6225 } 6226 6227 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { 6228 sysctl_kmaps_dump(sb, range, va); 6229 sysctl_kmaps_reinit(range, va, attrs); 6230 } 6231 } 6232 6233 static int 6234 __CONCAT(PMTYPE, sysctl_kmaps)(SYSCTL_HANDLER_ARGS) 6235 { 6236 struct pmap_kernel_map_range range; 6237 struct sbuf sbuf, *sb; 6238 pd_entry_t pde; 6239 pt_entry_t *pt, pte; 6240 vm_offset_t sva; 6241 int error; 6242 u_int i, k; 6243 6244 error = sysctl_wire_old_buffer(req, 0); 6245 if (error != 0) 6246 return (error); 6247 sb = &sbuf; 6248 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req); 6249 6250 /* Sentinel value. */ 6251 range.sva = 0xffffffff; 6252 6253 /* 6254 * Iterate over the kernel page tables without holding the 6255 * kernel pmap lock. Kernel page table pages are never freed, 6256 * so at worst we will observe inconsistencies in the output. 6257 */ 6258 for (sva = 0, i = 0; i < NPTEPG * NPGPTD * NPDEPG ;) { 6259 if (i == 0) 6260 sbuf_printf(sb, "\nLow PDE:\n"); 6261 else if (i == LOWPTDI * NPTEPG) 6262 sbuf_printf(sb, "Low PDE dup:\n"); 6263 else if (i == PTDPTDI * NPTEPG) 6264 sbuf_printf(sb, "Recursive map:\n"); 6265 else if (i == KERNPTDI * NPTEPG) 6266 sbuf_printf(sb, "Kernel base:\n"); 6267 else if (i == TRPTDI * NPTEPG) 6268 sbuf_printf(sb, "Trampoline:\n"); 6269 pde = IdlePTD[sva >> PDRSHIFT]; 6270 if ((pde & PG_V) == 0) { 6271 sva = rounddown2(sva, NBPDR); 6272 sysctl_kmaps_dump(sb, &range, sva); 6273 sva += NBPDR; 6274 i += NPTEPG; 6275 continue; 6276 } 6277 if ((pde & PG_PS) != 0) { 6278 sysctl_kmaps_check(sb, &range, sva, pde, 0); 6279 range.pdes++; 6280 sva += NBPDR; 6281 i += NPTEPG; 6282 continue; 6283 } 6284 for (pt = vtopte(sva), k = 0; k < NPTEPG; i++, k++, pt++, 6285 sva += PAGE_SIZE) { 6286 pte = *pt; 6287 if ((pte & PG_V) == 0) { 6288 sysctl_kmaps_dump(sb, &range, sva); 6289 continue; 6290 } 6291 sysctl_kmaps_check(sb, &range, sva, pde, pte); 6292 range.ptes++; 6293 } 6294 } 6295 6296 error = sbuf_finish(sb); 6297 sbuf_delete(sb); 6298 return (error); 6299 } 6300 6301 #define PMM(a) \ 6302 .pm_##a = __CONCAT(PMTYPE, a), 6303 6304 struct pmap_methods __CONCAT(PMTYPE, methods) = { 6305 PMM(ksetrw) 6306 PMM(remap_lower) 6307 PMM(remap_lowptdi) 6308 PMM(align_superpage) 6309 PMM(quick_enter_page) 6310 PMM(quick_remove_page) 6311 PMM(trm_alloc) 6312 PMM(trm_free) 6313 PMM(get_map_low) 6314 PMM(get_vm_maxuser_address) 6315 PMM(kextract) 6316 PMM(pg_frame) 6317 PMM(sf_buf_map) 6318 PMM(cp_slow0_map) 6319 PMM(get_kcr3) 6320 PMM(get_cr3) 6321 PMM(cmap3) 6322 PMM(basemem_setup) 6323 PMM(set_nx) 6324 PMM(bios16_enter) 6325 PMM(bios16_leave) 6326 PMM(bootstrap) 6327 PMM(is_valid_memattr) 6328 PMM(cache_bits) 6329 PMM(ps_enabled) 6330 PMM(pinit0) 6331 PMM(pinit) 6332 PMM(activate) 6333 PMM(activate_boot) 6334 PMM(advise) 6335 PMM(clear_modify) 6336 PMM(change_attr) 6337 PMM(mincore) 6338 PMM(copy) 6339 PMM(copy_page) 6340 PMM(copy_pages) 6341 PMM(zero_page) 6342 PMM(zero_page_area) 6343 PMM(enter) 6344 PMM(enter_object) 6345 PMM(enter_quick) 6346 PMM(kenter_temporary) 6347 PMM(object_init_pt) 6348 PMM(unwire) 6349 PMM(page_exists_quick) 6350 PMM(page_wired_mappings) 6351 PMM(page_is_mapped) 6352 PMM(remove_pages) 6353 PMM(is_modified) 6354 PMM(is_prefaultable) 6355 PMM(is_referenced) 6356 PMM(remove_write) 6357 PMM(ts_referenced) 6358 PMM(mapdev_attr) 6359 PMM(unmapdev) 6360 PMM(page_set_memattr) 6361 PMM(extract) 6362 PMM(extract_and_hold) 6363 PMM(map) 6364 PMM(qenter) 6365 PMM(qremove) 6366 PMM(release) 6367 PMM(remove) 6368 PMM(protect) 6369 PMM(remove_all) 6370 PMM(init) 6371 PMM(init_pat) 6372 PMM(growkernel) 6373 PMM(invalidate_page) 6374 PMM(invalidate_range) 6375 PMM(invalidate_all) 6376 PMM(invalidate_cache) 6377 PMM(flush_page) 6378 PMM(kenter) 6379 PMM(kremove) 6380 PMM(sysctl_kmaps) 6381 }; 6382