1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * the Systems Programming Group of the University of Utah Computer 15 * Science Department and William Jolitz of UUNET Technologies Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. All advertising materials mentioning features or use of this software 26 * must display the following acknowledgement: 27 * This product includes software developed by the University of 28 * California, Berkeley and its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 46 */ 47 /*- 48 * Copyright (c) 2003 Networks Associates Technology, Inc. 49 * All rights reserved. 50 * Copyright (c) 2018 The FreeBSD Foundation 51 * All rights reserved. 52 * 53 * This software was developed for the FreeBSD Project by Jake Burkholder, 54 * Safeport Network Services, and Network Associates Laboratories, the 55 * Security Research Division of Network Associates, Inc. under 56 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 57 * CHATS research program. 58 * 59 * Portions of this software were developed by 60 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from 61 * the FreeBSD Foundation. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 1. Redistributions of source code must retain the above copyright 67 * notice, this list of conditions and the following disclaimer. 68 * 2. Redistributions in binary form must reproduce the above copyright 69 * notice, this list of conditions and the following disclaimer in the 70 * documentation and/or other materials provided with the distribution. 71 * 72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 82 * SUCH DAMAGE. 83 */ 84 85 #include <sys/cdefs.h> 86 __FBSDID("$FreeBSD$"); 87 88 /* 89 * Manages physical address maps. 90 * 91 * Since the information managed by this module is 92 * also stored by the logical address mapping module, 93 * this module may throw away valid virtual-to-physical 94 * mappings at almost any time. However, invalidations 95 * of virtual-to-physical mappings must be done as 96 * requested. 97 * 98 * In order to cope with hardware architectures which 99 * make virtual-to-physical map invalidates expensive, 100 * this module may delay invalidate or reduced protection 101 * operations until such time as they are actually 102 * necessary. This module is given full information as 103 * to which processors are currently using which maps, 104 * and to when physical maps must be made correct. 105 */ 106 107 #include "opt_apic.h" 108 #include "opt_cpu.h" 109 #include "opt_pmap.h" 110 #include "opt_smp.h" 111 #include "opt_vm.h" 112 113 #include <sys/param.h> 114 #include <sys/systm.h> 115 #include <sys/kernel.h> 116 #include <sys/ktr.h> 117 #include <sys/lock.h> 118 #include <sys/malloc.h> 119 #include <sys/mman.h> 120 #include <sys/msgbuf.h> 121 #include <sys/mutex.h> 122 #include <sys/proc.h> 123 #include <sys/rwlock.h> 124 #include <sys/sbuf.h> 125 #include <sys/sf_buf.h> 126 #include <sys/sx.h> 127 #include <sys/vmmeter.h> 128 #include <sys/sched.h> 129 #include <sys/sysctl.h> 130 #include <sys/smp.h> 131 #include <sys/vmem.h> 132 133 #include <vm/vm.h> 134 #include <vm/vm_param.h> 135 #include <vm/vm_kern.h> 136 #include <vm/vm_page.h> 137 #include <vm/vm_map.h> 138 #include <vm/vm_object.h> 139 #include <vm/vm_extern.h> 140 #include <vm/vm_pageout.h> 141 #include <vm/vm_pager.h> 142 #include <vm/vm_phys.h> 143 #include <vm/vm_radix.h> 144 #include <vm/vm_reserv.h> 145 #include <vm/uma.h> 146 147 #ifdef DEV_APIC 148 #include <sys/bus.h> 149 #include <machine/intr_machdep.h> 150 #include <x86/apicvar.h> 151 #endif 152 #include <x86/ifunc.h> 153 #include <machine/bootinfo.h> 154 #include <machine/cpu.h> 155 #include <machine/cputypes.h> 156 #include <machine/md_var.h> 157 #include <machine/pcb.h> 158 #include <machine/specialreg.h> 159 #ifdef SMP 160 #include <machine/smp.h> 161 #endif 162 #include <machine/pmap_base.h> 163 164 #if !defined(DIAGNOSTIC) 165 #ifdef __GNUC_GNU_INLINE__ 166 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline 167 #else 168 #define PMAP_INLINE extern inline 169 #endif 170 #else 171 #define PMAP_INLINE 172 #endif 173 174 #ifdef PV_STATS 175 #define PV_STAT(x) do { x ; } while (0) 176 #else 177 #define PV_STAT(x) do { } while (0) 178 #endif 179 180 #define pa_index(pa) ((pa) >> PDRSHIFT) 181 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 182 183 /* 184 * PTmap is recursive pagemap at top of virtual address space. 185 * Within PTmap, the page directory can be found (third indirection). 186 */ 187 #define PTmap ((pt_entry_t *)(PTDPTDI << PDRSHIFT)) 188 #define PTD ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE))) 189 #define PTDpde ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE) + \ 190 (PTDPTDI * PDESIZE))) 191 192 /* 193 * Translate a virtual address to the kernel virtual address of its page table 194 * entry (PTE). This can be used recursively. If the address of a PTE as 195 * previously returned by this macro is itself given as the argument, then the 196 * address of the page directory entry (PDE) that maps the PTE will be 197 * returned. 198 * 199 * This macro may be used before pmap_bootstrap() is called. 200 */ 201 #define vtopte(va) (PTmap + i386_btop(va)) 202 203 /* 204 * Get PDEs and PTEs for user/kernel address space 205 */ 206 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 207 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 208 209 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 210 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 211 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 212 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 213 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 214 215 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ 216 atomic_clear_int((u_int *)(pte), PG_W)) 217 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 218 219 static int pgeflag = 0; /* PG_G or-in */ 220 static int pseflag = 0; /* PG_PS or-in */ 221 222 static int nkpt = NKPT; 223 224 #ifdef PMAP_PAE_COMP 225 pt_entry_t pg_nx; 226 static uma_zone_t pdptzone; 227 #else 228 #define pg_nx 0 229 #endif 230 231 _Static_assert(VM_MAXUSER_ADDRESS == VADDR(TRPTDI, 0), "VM_MAXUSER_ADDRESS"); 232 _Static_assert(VM_MAX_KERNEL_ADDRESS <= VADDR(PTDPTDI, 0), 233 "VM_MAX_KERNEL_ADDRESS"); 234 _Static_assert(PMAP_MAP_LOW == VADDR(LOWPTDI, 0), "PMAP_MAP_LOW"); 235 _Static_assert(KERNLOAD == (KERNPTDI << PDRSHIFT), "KERNLOAD"); 236 237 extern int pat_works; 238 extern int pg_ps_enabled; 239 240 extern int elf32_nxstack; 241 242 #define PAT_INDEX_SIZE 8 243 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ 244 245 /* 246 * pmap_mapdev support pre initialization (i.e. console) 247 */ 248 #define PMAP_PREINIT_MAPPING_COUNT 8 249 static struct pmap_preinit_mapping { 250 vm_paddr_t pa; 251 vm_offset_t va; 252 vm_size_t sz; 253 int mode; 254 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; 255 static int pmap_initialized; 256 257 static struct rwlock_padalign pvh_global_lock; 258 259 /* 260 * Data for the pv entry allocation mechanism 261 */ 262 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 263 extern int pv_entry_max, pv_entry_count; 264 static int pv_entry_high_water = 0; 265 static struct md_page *pv_table; 266 extern int shpgperproc; 267 268 static struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 269 static int pv_maxchunks; /* How many chunks we have KVA for */ 270 static vm_offset_t pv_vafree; /* freelist stored in the PTE */ 271 272 /* 273 * All those kernel PT submaps that BSD is so fond of 274 */ 275 static pt_entry_t *CMAP3; 276 static pd_entry_t *KPTD; 277 static caddr_t CADDR3; 278 279 /* 280 * Crashdump maps. 281 */ 282 static caddr_t crashdumpmap; 283 284 static pt_entry_t *PMAP1 = NULL, *PMAP2, *PMAP3; 285 static pt_entry_t *PADDR1 = NULL, *PADDR2, *PADDR3; 286 #ifdef SMP 287 static int PMAP1cpu, PMAP3cpu; 288 extern int PMAP1changedcpu; 289 #endif 290 extern int PMAP1changed; 291 extern int PMAP1unchanged; 292 static struct mtx PMAP2mutex; 293 294 /* 295 * Internal flags for pmap_enter()'s helper functions. 296 */ 297 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 298 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 299 300 static void free_pv_chunk(struct pv_chunk *pc); 301 static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 302 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 303 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 304 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, 305 u_int flags); 306 #if VM_NRESERVLEVEL > 0 307 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 308 #endif 309 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 310 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 311 vm_offset_t va); 312 static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); 313 314 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); 315 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 316 static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, 317 vm_prot_t prot); 318 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, 319 u_int flags, vm_page_t m); 320 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 321 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 322 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); 323 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, 324 pd_entry_t pde); 325 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); 326 static boolean_t pmap_is_modified_pvh(struct md_page *pvh); 327 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh); 328 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 329 static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); 330 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); 331 #if VM_NRESERVLEVEL > 0 332 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 333 #endif 334 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, 335 vm_prot_t prot); 336 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); 337 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 338 struct spglist *free); 339 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 340 struct spglist *free); 341 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); 342 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free); 343 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 344 struct spglist *free); 345 static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va); 346 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 347 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 348 vm_page_t m); 349 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 350 pd_entry_t newpde); 351 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); 352 353 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); 354 355 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); 356 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); 357 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 358 static void pmap_pte_release(pt_entry_t *pte); 359 static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); 360 #ifdef PMAP_PAE_COMP 361 static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, 362 uint8_t *flags, int wait); 363 #endif 364 static void pmap_init_trm(void); 365 static void pmap_invalidate_all_int(pmap_t pmap); 366 367 static __inline void pagezero(void *page); 368 369 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 370 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 371 372 extern char _end[]; 373 extern u_long physfree; /* phys addr of next free page */ 374 extern u_long vm86phystk;/* PA of vm86/bios stack */ 375 extern u_long vm86paddr;/* address of vm86 region */ 376 extern int vm86pa; /* phys addr of vm86 region */ 377 extern u_long KERNend; /* phys addr end of kernel (just after bss) */ 378 #ifdef PMAP_PAE_COMP 379 pd_entry_t *IdlePTD_pae; /* phys addr of kernel PTD */ 380 pdpt_entry_t *IdlePDPT; /* phys addr of kernel PDPT */ 381 pt_entry_t *KPTmap_pae; /* address of kernel page tables */ 382 #define IdlePTD IdlePTD_pae 383 #define KPTmap KPTmap_pae 384 #else 385 pd_entry_t *IdlePTD_nopae; 386 pt_entry_t *KPTmap_nopae; 387 #define IdlePTD IdlePTD_nopae 388 #define KPTmap KPTmap_nopae 389 #endif 390 extern u_long KPTphys; /* phys addr of kernel page tables */ 391 extern u_long tramp_idleptd; 392 393 static u_long 394 allocpages(u_int cnt, u_long *physfree) 395 { 396 u_long res; 397 398 res = *physfree; 399 *physfree += PAGE_SIZE * cnt; 400 bzero((void *)res, PAGE_SIZE * cnt); 401 return (res); 402 } 403 404 static void 405 pmap_cold_map(u_long pa, u_long va, u_long cnt) 406 { 407 pt_entry_t *pt; 408 409 for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0; 410 cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE) 411 *pt = pa | PG_V | PG_RW | PG_A | PG_M; 412 } 413 414 static void 415 pmap_cold_mapident(u_long pa, u_long cnt) 416 { 417 418 pmap_cold_map(pa, pa, cnt); 419 } 420 421 _Static_assert(LOWPTDI * 2 * NBPDR == KERNBASE, 422 "Broken double-map of zero PTD"); 423 424 static void 425 __CONCAT(PMTYPE, remap_lower)(bool enable) 426 { 427 int i; 428 429 for (i = 0; i < LOWPTDI; i++) 430 IdlePTD[i] = enable ? IdlePTD[LOWPTDI + i] : 0; 431 load_cr3(rcr3()); /* invalidate TLB */ 432 } 433 434 /* 435 * Called from locore.s before paging is enabled. Sets up the first 436 * kernel page table. Since kernel is mapped with PA == VA, this code 437 * does not require relocations. 438 */ 439 void 440 __CONCAT(PMTYPE, cold)(void) 441 { 442 pt_entry_t *pt; 443 u_long a; 444 u_int cr3, ncr4; 445 446 physfree = (u_long)&_end; 447 if (bootinfo.bi_esymtab != 0) 448 physfree = bootinfo.bi_esymtab; 449 if (bootinfo.bi_kernend != 0) 450 physfree = bootinfo.bi_kernend; 451 physfree = roundup2(physfree, NBPDR); 452 KERNend = physfree; 453 454 /* Allocate Kernel Page Tables */ 455 KPTphys = allocpages(NKPT, &physfree); 456 KPTmap = (pt_entry_t *)KPTphys; 457 458 /* Allocate Page Table Directory */ 459 #ifdef PMAP_PAE_COMP 460 /* XXX only need 32 bytes (easier for now) */ 461 IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree); 462 #endif 463 IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree); 464 465 /* 466 * Allocate KSTACK. Leave a guard page between IdlePTD and 467 * proc0kstack, to control stack overflow for thread0 and 468 * prevent corruption of the page table. We leak the guard 469 * physical memory due to 1:1 mappings. 470 */ 471 allocpages(1, &physfree); 472 proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree); 473 474 /* vm86/bios stack */ 475 vm86phystk = allocpages(1, &physfree); 476 477 /* pgtable + ext + IOPAGES */ 478 vm86paddr = vm86pa = allocpages(3, &physfree); 479 480 /* Install page tables into PTD. Page table page 1 is wasted. */ 481 for (a = 0; a < NKPT; a++) 482 IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M; 483 484 #ifdef PMAP_PAE_COMP 485 /* PAE install PTD pointers into PDPT */ 486 for (a = 0; a < NPGPTD; a++) 487 IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V; 488 #endif 489 490 /* 491 * Install recursive mapping for kernel page tables into 492 * itself. 493 */ 494 for (a = 0; a < NPGPTD; a++) 495 IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V | 496 PG_RW; 497 498 /* 499 * Initialize page table pages mapping physical address zero 500 * through the (physical) end of the kernel. Many of these 501 * pages must be reserved, and we reserve them all and map 502 * them linearly for convenience. We do this even if we've 503 * enabled PSE above; we'll just switch the corresponding 504 * kernel PDEs before we turn on paging. 505 * 506 * This and all other page table entries allow read and write 507 * access for various reasons. Kernel mappings never have any 508 * access restrictions. 509 */ 510 pmap_cold_mapident(0, atop(NBPDR) * LOWPTDI); 511 pmap_cold_map(0, NBPDR * LOWPTDI, atop(NBPDR) * LOWPTDI); 512 pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE)); 513 514 /* Map page table directory */ 515 #ifdef PMAP_PAE_COMP 516 pmap_cold_mapident((u_long)IdlePDPT, 1); 517 #endif 518 pmap_cold_mapident((u_long)IdlePTD, NPGPTD); 519 520 /* Map early KPTmap. It is really pmap_cold_mapident. */ 521 pmap_cold_map(KPTphys, (u_long)KPTmap, NKPT); 522 523 /* Map proc0kstack */ 524 pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES); 525 /* ISA hole already mapped */ 526 527 pmap_cold_mapident(vm86phystk, 1); 528 pmap_cold_mapident(vm86pa, 3); 529 530 /* Map page 0 into the vm86 page table */ 531 *(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V; 532 533 /* ...likewise for the ISA hole for vm86 */ 534 for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0; 535 a < atop(ISA_HOLE_LENGTH); a++, pt++) 536 *pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A | 537 PG_M | PG_V; 538 539 /* Enable PSE, PGE, VME, and PAE if configured. */ 540 ncr4 = 0; 541 if ((cpu_feature & CPUID_PSE) != 0) { 542 ncr4 |= CR4_PSE; 543 pseflag = PG_PS; 544 /* 545 * Superpage mapping of the kernel text. Existing 4k 546 * page table pages are wasted. 547 */ 548 for (a = KERNBASE; a < KERNend; a += NBPDR) 549 IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M | 550 PG_RW | PG_V; 551 } 552 if ((cpu_feature & CPUID_PGE) != 0) { 553 ncr4 |= CR4_PGE; 554 pgeflag = PG_G; 555 } 556 ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0; 557 #ifdef PMAP_PAE_COMP 558 ncr4 |= CR4_PAE; 559 #endif 560 if (ncr4 != 0) 561 load_cr4(rcr4() | ncr4); 562 563 /* Now enable paging */ 564 #ifdef PMAP_PAE_COMP 565 cr3 = (u_int)IdlePDPT; 566 if ((cpu_feature & CPUID_PAT) == 0) 567 wbinvd(); 568 #else 569 cr3 = (u_int)IdlePTD; 570 #endif 571 tramp_idleptd = cr3; 572 load_cr3(cr3); 573 load_cr0(rcr0() | CR0_PG); 574 575 /* 576 * Now running relocated at KERNBASE where the system is 577 * linked to run. 578 */ 579 580 /* 581 * Remove the lowest part of the double mapping of low memory 582 * to get some null pointer checks. 583 */ 584 __CONCAT(PMTYPE, remap_lower)(false); 585 586 kernel_vm_end = /* 0 + */ NKPT * NBPDR; 587 #ifdef PMAP_PAE_COMP 588 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_PAE; 589 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_PAE; 590 i386_pmap_PDRSHIFT = PDRSHIFT_PAE; 591 #else 592 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_NOPAE; 593 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_NOPAE; 594 i386_pmap_PDRSHIFT = PDRSHIFT_NOPAE; 595 #endif 596 } 597 598 static void 599 __CONCAT(PMTYPE, set_nx)(void) 600 { 601 602 #ifdef PMAP_PAE_COMP 603 if ((amd_feature & AMDID_NX) == 0) 604 return; 605 pg_nx = PG_NX; 606 elf32_nxstack = 1; 607 /* EFER.EFER_NXE is set in initializecpu(). */ 608 #endif 609 } 610 611 /* 612 * Bootstrap the system enough to run with virtual memory. 613 * 614 * On the i386 this is called after pmap_cold() created initial 615 * kernel page table and enabled paging, and just syncs the pmap 616 * module with what has already been done. 617 */ 618 static void 619 __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr) 620 { 621 vm_offset_t va; 622 pt_entry_t *pte, *unused __unused; 623 struct pcpu *pc; 624 u_long res; 625 int i; 626 627 res = atop(firstaddr - (vm_paddr_t)KERNLOAD); 628 629 /* 630 * Add a physical memory segment (vm_phys_seg) corresponding to the 631 * preallocated kernel page table pages so that vm_page structures 632 * representing these pages will be created. The vm_page structures 633 * are required for promotion of the corresponding kernel virtual 634 * addresses to superpage mappings. 635 */ 636 vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 637 638 /* 639 * Initialize the first available kernel virtual address. 640 * However, using "firstaddr" may waste a few pages of the 641 * kernel virtual address space, because pmap_cold() may not 642 * have mapped every physical page that it allocated. 643 * Preferably, pmap_cold() would provide a first unused 644 * virtual address in addition to "firstaddr". 645 */ 646 virtual_avail = (vm_offset_t)firstaddr; 647 virtual_end = VM_MAX_KERNEL_ADDRESS; 648 649 /* 650 * Initialize the kernel pmap (which is statically allocated). 651 * Count bootstrap data as being resident in case any of this data is 652 * later unmapped (using pmap_remove()) and freed. 653 */ 654 PMAP_LOCK_INIT(kernel_pmap); 655 kernel_pmap->pm_pdir = IdlePTD; 656 #ifdef PMAP_PAE_COMP 657 kernel_pmap->pm_pdpt = IdlePDPT; 658 #endif 659 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 660 kernel_pmap->pm_stats.resident_count = res; 661 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 662 vm_radix_init(&kernel_pmap->pm_root); 663 664 /* 665 * Initialize the global pv list lock. 666 */ 667 rw_init(&pvh_global_lock, "pmap pv global"); 668 669 /* 670 * Reserve some special page table entries/VA space for temporary 671 * mapping of pages. 672 */ 673 #define SYSMAP(c, p, v, n) \ 674 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 675 676 va = virtual_avail; 677 pte = vtopte(va); 678 679 /* 680 * Initialize temporary map objects on the current CPU for use 681 * during early boot. 682 * CMAP1/CMAP2 are used for zeroing and copying pages. 683 * CMAP3 is used for the boot-time memory test. 684 */ 685 pc = get_pcpu(); 686 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 687 SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1) 688 SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1) 689 SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1) 690 691 SYSMAP(caddr_t, CMAP3, CADDR3, 1); 692 693 /* 694 * Crashdump maps. 695 */ 696 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 697 698 /* 699 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 700 */ 701 SYSMAP(caddr_t, unused, ptvmmap, 1) 702 703 /* 704 * msgbufp is used to map the system message buffer. 705 */ 706 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 707 708 /* 709 * KPTmap is used by pmap_kextract(). 710 * 711 * KPTmap is first initialized by pmap_cold(). However, that initial 712 * KPTmap can only support NKPT page table pages. Here, a larger 713 * KPTmap is created that can support KVA_PAGES page table pages. 714 */ 715 SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) 716 717 for (i = 0; i < NKPT; i++) 718 KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V; 719 720 /* 721 * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), 722 * respectively. 723 */ 724 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 725 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 726 SYSMAP(pt_entry_t *, PMAP3, PADDR3, 1) 727 728 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 729 730 virtual_avail = va; 731 732 /* 733 * Initialize the PAT MSR if present. 734 * pmap_init_pat() clears and sets CR4_PGE, which, as a 735 * side-effect, invalidates stale PG_G TLB entries that might 736 * have been created in our pre-boot environment. We assume 737 * that PAT support implies PGE and in reverse, PGE presence 738 * comes with PAT. Both features were added for Pentium Pro. 739 */ 740 pmap_init_pat(); 741 } 742 743 static void 744 pmap_init_reserved_pages(void) 745 { 746 struct pcpu *pc; 747 vm_offset_t pages; 748 int i; 749 750 #ifdef PMAP_PAE_COMP 751 if (!pae_mode) 752 return; 753 #else 754 if (pae_mode) 755 return; 756 #endif 757 CPU_FOREACH(i) { 758 pc = pcpu_find(i); 759 mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | 760 MTX_NEW); 761 pc->pc_copyout_maddr = kva_alloc(ptoa(2)); 762 if (pc->pc_copyout_maddr == 0) 763 panic("unable to allocate non-sleepable copyout KVA"); 764 sx_init(&pc->pc_copyout_slock, "cpslk"); 765 pc->pc_copyout_saddr = kva_alloc(ptoa(2)); 766 if (pc->pc_copyout_saddr == 0) 767 panic("unable to allocate sleepable copyout KVA"); 768 pc->pc_pmap_eh_va = kva_alloc(ptoa(1)); 769 if (pc->pc_pmap_eh_va == 0) 770 panic("unable to allocate pmap_extract_and_hold KVA"); 771 pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va); 772 773 /* 774 * Skip if the mappings have already been initialized, 775 * i.e. this is the BSP. 776 */ 777 if (pc->pc_cmap_addr1 != 0) 778 continue; 779 780 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 781 pages = kva_alloc(PAGE_SIZE * 3); 782 if (pages == 0) 783 panic("unable to allocate CMAP KVA"); 784 pc->pc_cmap_pte1 = vtopte(pages); 785 pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); 786 pc->pc_cmap_addr1 = (caddr_t)pages; 787 pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); 788 pc->pc_qmap_addr = pages + ptoa(2); 789 } 790 } 791 792 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 793 794 /* 795 * Setup the PAT MSR. 796 */ 797 static void 798 __CONCAT(PMTYPE, init_pat)(void) 799 { 800 int pat_table[PAT_INDEX_SIZE]; 801 uint64_t pat_msr; 802 u_long cr0, cr4; 803 int i; 804 805 /* Set default PAT index table. */ 806 for (i = 0; i < PAT_INDEX_SIZE; i++) 807 pat_table[i] = -1; 808 pat_table[PAT_WRITE_BACK] = 0; 809 pat_table[PAT_WRITE_THROUGH] = 1; 810 pat_table[PAT_UNCACHEABLE] = 3; 811 pat_table[PAT_WRITE_COMBINING] = 3; 812 pat_table[PAT_WRITE_PROTECTED] = 3; 813 pat_table[PAT_UNCACHED] = 3; 814 815 /* 816 * Bail if this CPU doesn't implement PAT. 817 * We assume that PAT support implies PGE. 818 */ 819 if ((cpu_feature & CPUID_PAT) == 0) { 820 for (i = 0; i < PAT_INDEX_SIZE; i++) 821 pat_index[i] = pat_table[i]; 822 pat_works = 0; 823 return; 824 } 825 826 /* 827 * Due to some Intel errata, we can only safely use the lower 4 828 * PAT entries. 829 * 830 * Intel Pentium III Processor Specification Update 831 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 832 * or Mode C Paging) 833 * 834 * Intel Pentium IV Processor Specification Update 835 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 836 */ 837 if (cpu_vendor_id == CPU_VENDOR_INTEL && 838 !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) 839 pat_works = 0; 840 841 /* Initialize default PAT entries. */ 842 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | 843 PAT_VALUE(1, PAT_WRITE_THROUGH) | 844 PAT_VALUE(2, PAT_UNCACHED) | 845 PAT_VALUE(3, PAT_UNCACHEABLE) | 846 PAT_VALUE(4, PAT_WRITE_BACK) | 847 PAT_VALUE(5, PAT_WRITE_THROUGH) | 848 PAT_VALUE(6, PAT_UNCACHED) | 849 PAT_VALUE(7, PAT_UNCACHEABLE); 850 851 if (pat_works) { 852 /* 853 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. 854 * Program 5 and 6 as WP and WC. 855 * Leave 4 and 7 as WB and UC. 856 */ 857 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); 858 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | 859 PAT_VALUE(6, PAT_WRITE_COMBINING); 860 pat_table[PAT_UNCACHED] = 2; 861 pat_table[PAT_WRITE_PROTECTED] = 5; 862 pat_table[PAT_WRITE_COMBINING] = 6; 863 } else { 864 /* 865 * Just replace PAT Index 2 with WC instead of UC-. 866 */ 867 pat_msr &= ~PAT_MASK(2); 868 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 869 pat_table[PAT_WRITE_COMBINING] = 2; 870 } 871 872 /* Disable PGE. */ 873 cr4 = rcr4(); 874 load_cr4(cr4 & ~CR4_PGE); 875 876 /* Disable caches (CD = 1, NW = 0). */ 877 cr0 = rcr0(); 878 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 879 880 /* Flushes caches and TLBs. */ 881 wbinvd(); 882 invltlb(); 883 884 /* Update PAT and index table. */ 885 wrmsr(MSR_PAT, pat_msr); 886 for (i = 0; i < PAT_INDEX_SIZE; i++) 887 pat_index[i] = pat_table[i]; 888 889 /* Flush caches and TLBs again. */ 890 wbinvd(); 891 invltlb(); 892 893 /* Restore caches and PGE. */ 894 load_cr0(cr0); 895 load_cr4(cr4); 896 } 897 898 #ifdef PMAP_PAE_COMP 899 static void * 900 pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 901 int wait) 902 { 903 904 /* Inform UMA that this allocator uses kernel_map/object. */ 905 *flags = UMA_SLAB_KERNEL; 906 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 907 bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 908 } 909 #endif 910 911 /* 912 * Abuse the pte nodes for unmapped kva to thread a kva freelist through. 913 * Requirements: 914 * - Must deal with pages in order to ensure that none of the PG_* bits 915 * are ever set, PG_V in particular. 916 * - Assumes we can write to ptes without pte_store() atomic ops, even 917 * on PAE systems. This should be ok. 918 * - Assumes nothing will ever test these addresses for 0 to indicate 919 * no mapping instead of correctly checking PG_V. 920 * - Assumes a vm_offset_t will fit in a pte (true for i386). 921 * Because PG_V is never set, there can be no mappings to invalidate. 922 */ 923 static vm_offset_t 924 pmap_ptelist_alloc(vm_offset_t *head) 925 { 926 pt_entry_t *pte; 927 vm_offset_t va; 928 929 va = *head; 930 if (va == 0) 931 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 932 pte = vtopte(va); 933 *head = *pte; 934 if (*head & PG_V) 935 panic("pmap_ptelist_alloc: va with PG_V set!"); 936 *pte = 0; 937 return (va); 938 } 939 940 static void 941 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 942 { 943 pt_entry_t *pte; 944 945 if (va & PG_V) 946 panic("pmap_ptelist_free: freeing va with PG_V set!"); 947 pte = vtopte(va); 948 *pte = *head; /* virtual! PG_V is 0 though */ 949 *head = va; 950 } 951 952 static void 953 pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 954 { 955 int i; 956 vm_offset_t va; 957 958 *head = 0; 959 for (i = npages - 1; i >= 0; i--) { 960 va = (vm_offset_t)base + i * PAGE_SIZE; 961 pmap_ptelist_free(head, va); 962 } 963 } 964 965 /* 966 * Initialize the pmap module. 967 * Called by vm_init, to initialize any structures that the pmap 968 * system needs to map virtual memory. 969 */ 970 static void 971 __CONCAT(PMTYPE, init)(void) 972 { 973 struct pmap_preinit_mapping *ppim; 974 vm_page_t mpte; 975 vm_size_t s; 976 int i, pv_npg; 977 978 /* 979 * Initialize the vm page array entries for the kernel pmap's 980 * page table pages. 981 */ 982 PMAP_LOCK(kernel_pmap); 983 for (i = 0; i < NKPT; i++) { 984 mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); 985 KASSERT(mpte >= vm_page_array && 986 mpte < &vm_page_array[vm_page_array_size], 987 ("pmap_init: page table page is out of range")); 988 mpte->pindex = i + KPTDI; 989 mpte->phys_addr = KPTphys + ptoa(i); 990 mpte->ref_count = 1; 991 992 /* 993 * Collect the page table pages that were replaced by a 2/4MB 994 * page. They are filled with equivalent 4KB page mappings. 995 */ 996 if (pseflag != 0 && 997 KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && 998 pmap_insert_pt_page(kernel_pmap, mpte, true)) 999 panic("pmap_init: pmap_insert_pt_page failed"); 1000 } 1001 PMAP_UNLOCK(kernel_pmap); 1002 vm_wire_add(NKPT); 1003 1004 /* 1005 * Initialize the address space (zone) for the pv entries. Set a 1006 * high water mark so that the system can recover from excessive 1007 * numbers of pv entries. 1008 */ 1009 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1010 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1011 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1012 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1013 pv_entry_high_water = 9 * (pv_entry_max / 10); 1014 1015 /* 1016 * If the kernel is running on a virtual machine, then it must assume 1017 * that MCA is enabled by the hypervisor. Moreover, the kernel must 1018 * be prepared for the hypervisor changing the vendor and family that 1019 * are reported by CPUID. Consequently, the workaround for AMD Family 1020 * 10h Erratum 383 is enabled if the processor's feature set does not 1021 * include at least one feature that is only supported by older Intel 1022 * or newer AMD processors. 1023 */ 1024 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 && 1025 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | 1026 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | 1027 AMDID2_FMA4)) == 0) 1028 workaround_erratum383 = 1; 1029 1030 /* 1031 * Are large page mappings supported and enabled? 1032 */ 1033 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 1034 if (pseflag == 0) 1035 pg_ps_enabled = 0; 1036 else if (pg_ps_enabled) { 1037 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1038 ("pmap_init: can't assign to pagesizes[1]")); 1039 pagesizes[1] = NBPDR; 1040 } 1041 1042 /* 1043 * Calculate the size of the pv head table for superpages. 1044 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1045 */ 1046 pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end - 1047 PAGE_SIZE) / NBPDR + 1; 1048 1049 /* 1050 * Allocate memory for the pv head table for superpages. 1051 */ 1052 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1053 s = round_page(s); 1054 pv_table = kmem_malloc(s, M_WAITOK | M_ZERO); 1055 for (i = 0; i < pv_npg; i++) 1056 TAILQ_INIT(&pv_table[i].pv_list); 1057 1058 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1059 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1060 if (pv_chunkbase == NULL) 1061 panic("pmap_init: not enough kvm for pv chunks"); 1062 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1063 #ifdef PMAP_PAE_COMP 1064 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 1065 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 1066 UMA_ZONE_CONTIG | UMA_ZONE_VM | UMA_ZONE_NOFREE); 1067 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 1068 #endif 1069 1070 pmap_initialized = 1; 1071 pmap_init_trm(); 1072 1073 if (!bootverbose) 1074 return; 1075 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 1076 ppim = pmap_preinit_mapping + i; 1077 if (ppim->va == 0) 1078 continue; 1079 printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, 1080 (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); 1081 } 1082 1083 } 1084 1085 extern u_long pmap_pde_demotions; 1086 extern u_long pmap_pde_mappings; 1087 extern u_long pmap_pde_p_failures; 1088 extern u_long pmap_pde_promotions; 1089 1090 /*************************************************** 1091 * Low level helper routines..... 1092 ***************************************************/ 1093 1094 static boolean_t 1095 __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode) 1096 { 1097 1098 return (mode >= 0 && mode < PAT_INDEX_SIZE && 1099 pat_index[(int)mode] >= 0); 1100 } 1101 1102 /* 1103 * Determine the appropriate bits to set in a PTE or PDE for a specified 1104 * caching mode. 1105 */ 1106 static int 1107 __CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde) 1108 { 1109 int cache_bits, pat_flag, pat_idx; 1110 1111 if (!pmap_is_valid_memattr(pmap, mode)) 1112 panic("Unknown caching mode %d\n", mode); 1113 1114 /* The PAT bit is different for PTE's and PDE's. */ 1115 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1116 1117 /* Map the caching mode to a PAT index. */ 1118 pat_idx = pat_index[mode]; 1119 1120 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 1121 cache_bits = 0; 1122 if (pat_idx & 0x4) 1123 cache_bits |= pat_flag; 1124 if (pat_idx & 0x2) 1125 cache_bits |= PG_NC_PCD; 1126 if (pat_idx & 0x1) 1127 cache_bits |= PG_NC_PWT; 1128 return (cache_bits); 1129 } 1130 1131 static int 1132 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde) 1133 { 1134 int pat_flag, pat_idx; 1135 1136 if ((cpu_feature & CPUID_PAT) == 0) 1137 return (0); 1138 1139 pat_idx = 0; 1140 /* The PAT bit is different for PTE's and PDE's. */ 1141 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1142 1143 if ((pte & pat_flag) != 0) 1144 pat_idx |= 0x4; 1145 if ((pte & PG_NC_PCD) != 0) 1146 pat_idx |= 0x2; 1147 if ((pte & PG_NC_PWT) != 0) 1148 pat_idx |= 0x1; 1149 1150 /* See pmap_init_pat(). */ 1151 if (pat_works) { 1152 if (pat_idx == 4) 1153 pat_idx = 0; 1154 if (pat_idx == 7) 1155 pat_idx = 3; 1156 } else { 1157 /* XXXKIB */ 1158 } 1159 1160 return (pat_idx); 1161 } 1162 1163 static bool 1164 __CONCAT(PMTYPE, ps_enabled)(pmap_t pmap __unused) 1165 { 1166 1167 return (pg_ps_enabled); 1168 } 1169 1170 /* 1171 * The caller is responsible for maintaining TLB consistency. 1172 */ 1173 static void 1174 pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) 1175 { 1176 pd_entry_t *pde; 1177 1178 pde = pmap_pde(kernel_pmap, va); 1179 pde_store(pde, newpde); 1180 } 1181 1182 /* 1183 * After changing the page size for the specified virtual address in the page 1184 * table, flush the corresponding entries from the processor's TLB. Only the 1185 * calling processor's TLB is affected. 1186 * 1187 * The calling thread must be pinned to a processor. 1188 */ 1189 static void 1190 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) 1191 { 1192 1193 if ((newpde & PG_PS) == 0) 1194 /* Demotion: flush a specific 2MB page mapping. */ 1195 invlpg(va); 1196 else /* if ((newpde & PG_G) == 0) */ 1197 /* 1198 * Promotion: flush every 4KB page mapping from the TLB 1199 * because there are too many to flush individually. 1200 */ 1201 invltlb(); 1202 } 1203 1204 #ifdef SMP 1205 1206 static void 1207 pmap_curcpu_cb_dummy(pmap_t pmap __unused, vm_offset_t addr1 __unused, 1208 vm_offset_t addr2 __unused) 1209 { 1210 } 1211 1212 /* 1213 * For SMP, these functions have to use the IPI mechanism for coherence. 1214 * 1215 * N.B.: Before calling any of the following TLB invalidation functions, 1216 * the calling processor must ensure that all stores updating a non- 1217 * kernel page table are globally performed. Otherwise, another 1218 * processor could cache an old, pre-update entry without being 1219 * invalidated. This can happen one of two ways: (1) The pmap becomes 1220 * active on another processor after its pm_active field is checked by 1221 * one of the following functions but before a store updating the page 1222 * table is globally performed. (2) The pmap becomes active on another 1223 * processor before its pm_active field is checked but due to 1224 * speculative loads one of the following functions stills reads the 1225 * pmap as inactive on the other processor. 1226 * 1227 * The kernel page table is exempt because its pm_active field is 1228 * immutable. The kernel page table is always active on every 1229 * processor. 1230 */ 1231 static void 1232 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1233 { 1234 cpuset_t *mask, other_cpus; 1235 u_int cpuid; 1236 1237 sched_pin(); 1238 if (pmap == kernel_pmap) { 1239 invlpg(va); 1240 mask = &all_cpus; 1241 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1242 mask = &all_cpus; 1243 } else { 1244 cpuid = PCPU_GET(cpuid); 1245 other_cpus = all_cpus; 1246 CPU_CLR(cpuid, &other_cpus); 1247 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1248 mask = &other_cpus; 1249 } 1250 smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy); 1251 sched_unpin(); 1252 } 1253 1254 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ 1255 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) 1256 1257 static void 1258 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1259 { 1260 cpuset_t *mask, other_cpus; 1261 vm_offset_t addr; 1262 u_int cpuid; 1263 1264 if (eva - sva >= PMAP_INVLPG_THRESHOLD) { 1265 pmap_invalidate_all_int(pmap); 1266 return; 1267 } 1268 1269 sched_pin(); 1270 if (pmap == kernel_pmap) { 1271 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1272 invlpg(addr); 1273 mask = &all_cpus; 1274 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1275 mask = &all_cpus; 1276 } else { 1277 cpuid = PCPU_GET(cpuid); 1278 other_cpus = all_cpus; 1279 CPU_CLR(cpuid, &other_cpus); 1280 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1281 mask = &other_cpus; 1282 } 1283 smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy); 1284 sched_unpin(); 1285 } 1286 1287 static void 1288 pmap_invalidate_all_int(pmap_t pmap) 1289 { 1290 cpuset_t *mask, other_cpus; 1291 u_int cpuid; 1292 1293 sched_pin(); 1294 if (pmap == kernel_pmap) { 1295 invltlb(); 1296 mask = &all_cpus; 1297 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1298 mask = &all_cpus; 1299 } else { 1300 cpuid = PCPU_GET(cpuid); 1301 other_cpus = all_cpus; 1302 CPU_CLR(cpuid, &other_cpus); 1303 CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active); 1304 mask = &other_cpus; 1305 } 1306 smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy); 1307 sched_unpin(); 1308 } 1309 1310 static void 1311 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, 1312 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) 1313 { 1314 wbinvd(); 1315 } 1316 1317 static void 1318 __CONCAT(PMTYPE, invalidate_cache)(void) 1319 { 1320 smp_cache_flush(pmap_invalidate_cache_curcpu_cb); 1321 } 1322 1323 struct pde_action { 1324 cpuset_t invalidate; /* processors that invalidate their TLB */ 1325 vm_offset_t va; 1326 pd_entry_t *pde; 1327 pd_entry_t newpde; 1328 u_int store; /* processor that updates the PDE */ 1329 }; 1330 1331 static void 1332 pmap_update_pde_kernel(void *arg) 1333 { 1334 struct pde_action *act = arg; 1335 pd_entry_t *pde; 1336 1337 if (act->store == PCPU_GET(cpuid)) { 1338 pde = pmap_pde(kernel_pmap, act->va); 1339 pde_store(pde, act->newpde); 1340 } 1341 } 1342 1343 static void 1344 pmap_update_pde_user(void *arg) 1345 { 1346 struct pde_action *act = arg; 1347 1348 if (act->store == PCPU_GET(cpuid)) 1349 pde_store(act->pde, act->newpde); 1350 } 1351 1352 static void 1353 pmap_update_pde_teardown(void *arg) 1354 { 1355 struct pde_action *act = arg; 1356 1357 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) 1358 pmap_update_pde_invalidate(act->va, act->newpde); 1359 } 1360 1361 /* 1362 * Change the page size for the specified virtual address in a way that 1363 * prevents any possibility of the TLB ever having two entries that map the 1364 * same virtual address using different page sizes. This is the recommended 1365 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a 1366 * machine check exception for a TLB state that is improperly diagnosed as a 1367 * hardware error. 1368 */ 1369 static void 1370 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1371 { 1372 struct pde_action act; 1373 cpuset_t active, other_cpus; 1374 u_int cpuid; 1375 1376 sched_pin(); 1377 cpuid = PCPU_GET(cpuid); 1378 other_cpus = all_cpus; 1379 CPU_CLR(cpuid, &other_cpus); 1380 if (pmap == kernel_pmap) 1381 active = all_cpus; 1382 else 1383 active = pmap->pm_active; 1384 if (CPU_OVERLAP(&active, &other_cpus)) { 1385 act.store = cpuid; 1386 act.invalidate = active; 1387 act.va = va; 1388 act.pde = pde; 1389 act.newpde = newpde; 1390 CPU_SET(cpuid, &active); 1391 smp_rendezvous_cpus(active, 1392 smp_no_rendezvous_barrier, pmap == kernel_pmap ? 1393 pmap_update_pde_kernel : pmap_update_pde_user, 1394 pmap_update_pde_teardown, &act); 1395 } else { 1396 if (pmap == kernel_pmap) 1397 pmap_kenter_pde(va, newpde); 1398 else 1399 pde_store(pde, newpde); 1400 if (CPU_ISSET(cpuid, &active)) 1401 pmap_update_pde_invalidate(va, newpde); 1402 } 1403 sched_unpin(); 1404 } 1405 #else /* !SMP */ 1406 /* 1407 * Normal, non-SMP, 486+ invalidation functions. 1408 * We inline these within pmap.c for speed. 1409 */ 1410 static void 1411 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1412 { 1413 1414 if (pmap == kernel_pmap) 1415 invlpg(va); 1416 } 1417 1418 static void 1419 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1420 { 1421 vm_offset_t addr; 1422 1423 if (pmap == kernel_pmap) 1424 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1425 invlpg(addr); 1426 } 1427 1428 static void 1429 pmap_invalidate_all_int(pmap_t pmap) 1430 { 1431 1432 if (pmap == kernel_pmap) 1433 invltlb(); 1434 } 1435 1436 static void 1437 __CONCAT(PMTYPE, invalidate_cache)(void) 1438 { 1439 1440 wbinvd(); 1441 } 1442 1443 static void 1444 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1445 { 1446 1447 if (pmap == kernel_pmap) 1448 pmap_kenter_pde(va, newpde); 1449 else 1450 pde_store(pde, newpde); 1451 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1452 pmap_update_pde_invalidate(va, newpde); 1453 } 1454 #endif /* !SMP */ 1455 1456 static void 1457 __CONCAT(PMTYPE, invalidate_page)(pmap_t pmap, vm_offset_t va) 1458 { 1459 1460 pmap_invalidate_page_int(pmap, va); 1461 } 1462 1463 static void 1464 __CONCAT(PMTYPE, invalidate_range)(pmap_t pmap, vm_offset_t sva, 1465 vm_offset_t eva) 1466 { 1467 1468 pmap_invalidate_range_int(pmap, sva, eva); 1469 } 1470 1471 static void 1472 __CONCAT(PMTYPE, invalidate_all)(pmap_t pmap) 1473 { 1474 1475 pmap_invalidate_all_int(pmap); 1476 } 1477 1478 static void 1479 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1480 { 1481 1482 /* 1483 * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was 1484 * created by a promotion that did not invalidate the 512 or 1024 4KB 1485 * page mappings that might exist in the TLB. Consequently, at this 1486 * point, the TLB may hold both 4KB and 2- or 4MB page mappings for 1487 * the address range [va, va + NBPDR). Therefore, the entire range 1488 * must be invalidated here. In contrast, when PG_PROMOTED is clear, 1489 * the TLB will not hold any 4KB page mappings for the address range 1490 * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the 1491 * 2- or 4MB page mapping from the TLB. 1492 */ 1493 if ((pde & PG_PROMOTED) != 0) 1494 pmap_invalidate_range_int(pmap, va, va + NBPDR - 1); 1495 else 1496 pmap_invalidate_page_int(pmap, va); 1497 } 1498 1499 /* 1500 * Are we current address space or kernel? 1501 */ 1502 static __inline int 1503 pmap_is_current(pmap_t pmap) 1504 { 1505 1506 return (pmap == kernel_pmap); 1507 } 1508 1509 /* 1510 * If the given pmap is not the current or kernel pmap, the returned pte must 1511 * be released by passing it to pmap_pte_release(). 1512 */ 1513 static pt_entry_t * 1514 __CONCAT(PMTYPE, pte)(pmap_t pmap, vm_offset_t va) 1515 { 1516 pd_entry_t newpf; 1517 pd_entry_t *pde; 1518 1519 pde = pmap_pde(pmap, va); 1520 if (*pde & PG_PS) 1521 return (pde); 1522 if (*pde != 0) { 1523 /* are we current address space or kernel? */ 1524 if (pmap_is_current(pmap)) 1525 return (vtopte(va)); 1526 mtx_lock(&PMAP2mutex); 1527 newpf = *pde & PG_FRAME; 1528 if ((*PMAP2 & PG_FRAME) != newpf) { 1529 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; 1530 pmap_invalidate_page_int(kernel_pmap, 1531 (vm_offset_t)PADDR2); 1532 } 1533 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1534 } 1535 return (NULL); 1536 } 1537 1538 /* 1539 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1540 * being NULL. 1541 */ 1542 static __inline void 1543 pmap_pte_release(pt_entry_t *pte) 1544 { 1545 1546 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) 1547 mtx_unlock(&PMAP2mutex); 1548 } 1549 1550 /* 1551 * NB: The sequence of updating a page table followed by accesses to the 1552 * corresponding pages is subject to the situation described in the "AMD64 1553 * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23, 1554 * "7.3.1 Special Coherency Considerations". Therefore, issuing the INVLPG 1555 * right after modifying the PTE bits is crucial. 1556 */ 1557 static __inline void 1558 invlcaddr(void *caddr) 1559 { 1560 1561 invlpg((u_int)caddr); 1562 } 1563 1564 /* 1565 * Super fast pmap_pte routine best used when scanning 1566 * the pv lists. This eliminates many coarse-grained 1567 * invltlb calls. Note that many of the pv list 1568 * scans are across different pmaps. It is very wasteful 1569 * to do an entire invltlb for checking a single mapping. 1570 * 1571 * If the given pmap is not the current pmap, pvh_global_lock 1572 * must be held and curthread pinned to a CPU. 1573 */ 1574 static pt_entry_t * 1575 pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1576 { 1577 pd_entry_t newpf; 1578 pd_entry_t *pde; 1579 1580 pde = pmap_pde(pmap, va); 1581 if (*pde & PG_PS) 1582 return (pde); 1583 if (*pde != 0) { 1584 /* are we current address space or kernel? */ 1585 if (pmap_is_current(pmap)) 1586 return (vtopte(va)); 1587 rw_assert(&pvh_global_lock, RA_WLOCKED); 1588 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1589 newpf = *pde & PG_FRAME; 1590 if ((*PMAP1 & PG_FRAME) != newpf) { 1591 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; 1592 #ifdef SMP 1593 PMAP1cpu = PCPU_GET(cpuid); 1594 #endif 1595 invlcaddr(PADDR1); 1596 PMAP1changed++; 1597 } else 1598 #ifdef SMP 1599 if (PMAP1cpu != PCPU_GET(cpuid)) { 1600 PMAP1cpu = PCPU_GET(cpuid); 1601 invlcaddr(PADDR1); 1602 PMAP1changedcpu++; 1603 } else 1604 #endif 1605 PMAP1unchanged++; 1606 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1607 } 1608 return (0); 1609 } 1610 1611 static pt_entry_t * 1612 pmap_pte_quick3(pmap_t pmap, vm_offset_t va) 1613 { 1614 pd_entry_t newpf; 1615 pd_entry_t *pde; 1616 1617 pde = pmap_pde(pmap, va); 1618 if (*pde & PG_PS) 1619 return (pde); 1620 if (*pde != 0) { 1621 rw_assert(&pvh_global_lock, RA_WLOCKED); 1622 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1623 newpf = *pde & PG_FRAME; 1624 if ((*PMAP3 & PG_FRAME) != newpf) { 1625 *PMAP3 = newpf | PG_RW | PG_V | PG_A | PG_M; 1626 #ifdef SMP 1627 PMAP3cpu = PCPU_GET(cpuid); 1628 #endif 1629 invlcaddr(PADDR3); 1630 PMAP1changed++; 1631 } else 1632 #ifdef SMP 1633 if (PMAP3cpu != PCPU_GET(cpuid)) { 1634 PMAP3cpu = PCPU_GET(cpuid); 1635 invlcaddr(PADDR3); 1636 PMAP1changedcpu++; 1637 } else 1638 #endif 1639 PMAP1unchanged++; 1640 return (PADDR3 + (i386_btop(va) & (NPTEPG - 1))); 1641 } 1642 return (0); 1643 } 1644 1645 static pt_entry_t 1646 pmap_pte_ufast(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1647 { 1648 pt_entry_t *eh_ptep, pte, *ptep; 1649 1650 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1651 pde &= PG_FRAME; 1652 critical_enter(); 1653 eh_ptep = (pt_entry_t *)PCPU_GET(pmap_eh_ptep); 1654 if ((*eh_ptep & PG_FRAME) != pde) { 1655 *eh_ptep = pde | PG_RW | PG_V | PG_A | PG_M; 1656 invlcaddr((void *)PCPU_GET(pmap_eh_va)); 1657 } 1658 ptep = (pt_entry_t *)PCPU_GET(pmap_eh_va) + (i386_btop(va) & 1659 (NPTEPG - 1)); 1660 pte = *ptep; 1661 critical_exit(); 1662 return (pte); 1663 } 1664 1665 /* 1666 * Extract from the kernel page table the physical address that is mapped by 1667 * the given virtual address "va". 1668 * 1669 * This function may be used before pmap_bootstrap() is called. 1670 */ 1671 static vm_paddr_t 1672 __CONCAT(PMTYPE, kextract)(vm_offset_t va) 1673 { 1674 vm_paddr_t pa; 1675 1676 if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) { 1677 pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 1678 } else { 1679 /* 1680 * Beware of a concurrent promotion that changes the PDE at 1681 * this point! For example, vtopte() must not be used to 1682 * access the PTE because it would use the new PDE. It is, 1683 * however, safe to use the old PDE because the page table 1684 * page is preserved by the promotion. 1685 */ 1686 pa = KPTmap[i386_btop(va)]; 1687 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1688 } 1689 return (pa); 1690 } 1691 1692 /* 1693 * Routine: pmap_extract 1694 * Function: 1695 * Extract the physical page address associated 1696 * with the given map/virtual_address pair. 1697 */ 1698 static vm_paddr_t 1699 __CONCAT(PMTYPE, extract)(pmap_t pmap, vm_offset_t va) 1700 { 1701 vm_paddr_t rtval; 1702 pt_entry_t pte; 1703 pd_entry_t pde; 1704 1705 rtval = 0; 1706 PMAP_LOCK(pmap); 1707 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1708 if (pde != 0) { 1709 if ((pde & PG_PS) != 0) 1710 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); 1711 else { 1712 pte = pmap_pte_ufast(pmap, va, pde); 1713 rtval = (pte & PG_FRAME) | (va & PAGE_MASK); 1714 } 1715 } 1716 PMAP_UNLOCK(pmap); 1717 return (rtval); 1718 } 1719 1720 /* 1721 * Routine: pmap_extract_and_hold 1722 * Function: 1723 * Atomically extract and hold the physical page 1724 * with the given pmap and virtual address pair 1725 * if that mapping permits the given protection. 1726 */ 1727 static vm_page_t 1728 __CONCAT(PMTYPE, extract_and_hold)(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1729 { 1730 pd_entry_t pde; 1731 pt_entry_t pte; 1732 vm_page_t m; 1733 1734 m = NULL; 1735 PMAP_LOCK(pmap); 1736 pde = *pmap_pde(pmap, va); 1737 if (pde != 0) { 1738 if (pde & PG_PS) { 1739 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) 1740 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1741 (va & PDRMASK)); 1742 } else { 1743 pte = pmap_pte_ufast(pmap, va, pde); 1744 if (pte != 0 && 1745 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) 1746 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1747 } 1748 if (m != NULL && !vm_page_wire_mapped(m)) 1749 m = NULL; 1750 } 1751 PMAP_UNLOCK(pmap); 1752 return (m); 1753 } 1754 1755 /*************************************************** 1756 * Low level mapping routines..... 1757 ***************************************************/ 1758 1759 /* 1760 * Add a wired page to the kva. 1761 * Note: not SMP coherent. 1762 * 1763 * This function may be used before pmap_bootstrap() is called. 1764 */ 1765 static void 1766 __CONCAT(PMTYPE, kenter)(vm_offset_t va, vm_paddr_t pa) 1767 { 1768 pt_entry_t *pte; 1769 1770 pte = vtopte(va); 1771 pte_store(pte, pa | PG_RW | PG_V); 1772 } 1773 1774 static __inline void 1775 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1776 { 1777 pt_entry_t *pte; 1778 1779 pte = vtopte(va); 1780 pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, 1781 mode, 0)); 1782 } 1783 1784 /* 1785 * Remove a page from the kernel pagetables. 1786 * Note: not SMP coherent. 1787 * 1788 * This function may be used before pmap_bootstrap() is called. 1789 */ 1790 static void 1791 __CONCAT(PMTYPE, kremove)(vm_offset_t va) 1792 { 1793 pt_entry_t *pte; 1794 1795 pte = vtopte(va); 1796 pte_clear(pte); 1797 } 1798 1799 /* 1800 * Used to map a range of physical addresses into kernel 1801 * virtual address space. 1802 * 1803 * The value passed in '*virt' is a suggested virtual address for 1804 * the mapping. Architectures which can support a direct-mapped 1805 * physical to virtual region can return the appropriate address 1806 * within that region, leaving '*virt' unchanged. Other 1807 * architectures should map the pages starting at '*virt' and 1808 * update '*virt' with the first usable address after the mapped 1809 * region. 1810 */ 1811 static vm_offset_t 1812 __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1813 int prot) 1814 { 1815 vm_offset_t va, sva; 1816 vm_paddr_t superpage_offset; 1817 pd_entry_t newpde; 1818 1819 va = *virt; 1820 /* 1821 * Does the physical address range's size and alignment permit at 1822 * least one superpage mapping to be created? 1823 */ 1824 superpage_offset = start & PDRMASK; 1825 if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) { 1826 /* 1827 * Increase the starting virtual address so that its alignment 1828 * does not preclude the use of superpage mappings. 1829 */ 1830 if ((va & PDRMASK) < superpage_offset) 1831 va = (va & ~PDRMASK) + superpage_offset; 1832 else if ((va & PDRMASK) > superpage_offset) 1833 va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; 1834 } 1835 sva = va; 1836 while (start < end) { 1837 if ((start & PDRMASK) == 0 && end - start >= NBPDR && 1838 pseflag != 0) { 1839 KASSERT((va & PDRMASK) == 0, 1840 ("pmap_map: misaligned va %#x", va)); 1841 newpde = start | PG_PS | PG_RW | PG_V; 1842 pmap_kenter_pde(va, newpde); 1843 va += NBPDR; 1844 start += NBPDR; 1845 } else { 1846 pmap_kenter(va, start); 1847 va += PAGE_SIZE; 1848 start += PAGE_SIZE; 1849 } 1850 } 1851 pmap_invalidate_range_int(kernel_pmap, sva, va); 1852 *virt = va; 1853 return (sva); 1854 } 1855 1856 /* 1857 * Add a list of wired pages to the kva 1858 * this routine is only used for temporary 1859 * kernel mappings that do not need to have 1860 * page modification or references recorded. 1861 * Note that old mappings are simply written 1862 * over. The page *must* be wired. 1863 * Note: SMP coherent. Uses a ranged shootdown IPI. 1864 */ 1865 static void 1866 __CONCAT(PMTYPE, qenter)(vm_offset_t sva, vm_page_t *ma, int count) 1867 { 1868 pt_entry_t *endpte, oldpte, pa, *pte; 1869 vm_page_t m; 1870 1871 oldpte = 0; 1872 pte = vtopte(sva); 1873 endpte = pte + count; 1874 while (pte < endpte) { 1875 m = *ma++; 1876 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap, 1877 m->md.pat_mode, 0); 1878 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { 1879 oldpte |= *pte; 1880 pte_store(pte, pa | pg_nx | PG_RW | PG_V); 1881 } 1882 pte++; 1883 } 1884 if (__predict_false((oldpte & PG_V) != 0)) 1885 pmap_invalidate_range_int(kernel_pmap, sva, sva + count * 1886 PAGE_SIZE); 1887 } 1888 1889 /* 1890 * This routine tears out page mappings from the 1891 * kernel -- it is meant only for temporary mappings. 1892 * Note: SMP coherent. Uses a ranged shootdown IPI. 1893 */ 1894 static void 1895 __CONCAT(PMTYPE, qremove)(vm_offset_t sva, int count) 1896 { 1897 vm_offset_t va; 1898 1899 va = sva; 1900 while (count-- > 0) { 1901 pmap_kremove(va); 1902 va += PAGE_SIZE; 1903 } 1904 pmap_invalidate_range_int(kernel_pmap, sva, va); 1905 } 1906 1907 /*************************************************** 1908 * Page table page management routines..... 1909 ***************************************************/ 1910 /* 1911 * Schedule the specified unused page table page to be freed. Specifically, 1912 * add the page to the specified list of pages that will be released to the 1913 * physical memory manager after the TLB has been updated. 1914 */ 1915 static __inline void 1916 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 1917 boolean_t set_PG_ZERO) 1918 { 1919 1920 if (set_PG_ZERO) 1921 m->flags |= PG_ZERO; 1922 else 1923 m->flags &= ~PG_ZERO; 1924 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 1925 } 1926 1927 /* 1928 * Inserts the specified page table page into the specified pmap's collection 1929 * of idle page table pages. Each of a pmap's page table pages is responsible 1930 * for mapping a distinct range of virtual addresses. The pmap's collection is 1931 * ordered by this virtual address range. 1932 * 1933 * If "promoted" is false, then the page table page "mpte" must be zero filled. 1934 */ 1935 static __inline int 1936 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted) 1937 { 1938 1939 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1940 mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0; 1941 return (vm_radix_insert(&pmap->pm_root, mpte)); 1942 } 1943 1944 /* 1945 * Removes the page table page mapping the specified virtual address from the 1946 * specified pmap's collection of idle page table pages, and returns it. 1947 * Otherwise, returns NULL if there is no page table page corresponding to the 1948 * specified virtual address. 1949 */ 1950 static __inline vm_page_t 1951 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) 1952 { 1953 1954 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1955 return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT)); 1956 } 1957 1958 /* 1959 * Decrements a page table page's reference count, which is used to record the 1960 * number of valid page table entries within the page. If the reference count 1961 * drops to zero, then the page table page is unmapped. Returns TRUE if the 1962 * page table page was unmapped and FALSE otherwise. 1963 */ 1964 static inline boolean_t 1965 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1966 { 1967 1968 --m->ref_count; 1969 if (m->ref_count == 0) { 1970 _pmap_unwire_ptp(pmap, m, free); 1971 return (TRUE); 1972 } else 1973 return (FALSE); 1974 } 1975 1976 static void 1977 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1978 { 1979 1980 /* 1981 * unmap the page table page 1982 */ 1983 pmap->pm_pdir[m->pindex] = 0; 1984 --pmap->pm_stats.resident_count; 1985 1986 /* 1987 * There is not need to invalidate the recursive mapping since 1988 * we never instantiate such mapping for the usermode pmaps, 1989 * and never remove page table pages from the kernel pmap. 1990 * Put page on a list so that it is released since all TLB 1991 * shootdown is done. 1992 */ 1993 MPASS(pmap != kernel_pmap); 1994 pmap_add_delayed_free_list(m, free, TRUE); 1995 } 1996 1997 /* 1998 * After removing a page table entry, this routine is used to 1999 * conditionally free the page, and manage the reference count. 2000 */ 2001 static int 2002 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) 2003 { 2004 pd_entry_t ptepde; 2005 vm_page_t mpte; 2006 2007 if (pmap == kernel_pmap) 2008 return (0); 2009 ptepde = *pmap_pde(pmap, va); 2010 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 2011 return (pmap_unwire_ptp(pmap, mpte, free)); 2012 } 2013 2014 /* 2015 * Release a page table page reference after a failed attempt to create a 2016 * mapping. 2017 */ 2018 static void 2019 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 2020 { 2021 struct spglist free; 2022 2023 SLIST_INIT(&free); 2024 if (pmap_unwire_ptp(pmap, mpte, &free)) { 2025 /* 2026 * Although "va" was never mapped, paging-structure caches 2027 * could nonetheless have entries that refer to the freed 2028 * page table pages. Invalidate those entries. 2029 */ 2030 pmap_invalidate_page_int(pmap, va); 2031 vm_page_free_pages_toq(&free, true); 2032 } 2033 } 2034 2035 /* 2036 * Initialize the pmap for the swapper process. 2037 */ 2038 static void 2039 __CONCAT(PMTYPE, pinit0)(pmap_t pmap) 2040 { 2041 2042 PMAP_LOCK_INIT(pmap); 2043 pmap->pm_pdir = IdlePTD; 2044 #ifdef PMAP_PAE_COMP 2045 pmap->pm_pdpt = IdlePDPT; 2046 #endif 2047 vm_radix_init(&pmap->pm_root); 2048 CPU_ZERO(&pmap->pm_active); 2049 TAILQ_INIT(&pmap->pm_pvchunk); 2050 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2051 pmap_activate_boot(pmap); 2052 } 2053 2054 /* 2055 * Initialize a preallocated and zeroed pmap structure, 2056 * such as one in a vmspace structure. 2057 */ 2058 static int 2059 __CONCAT(PMTYPE, pinit)(pmap_t pmap) 2060 { 2061 int i; 2062 2063 /* 2064 * No need to allocate page table space yet but we do need a valid 2065 * page directory table. 2066 */ 2067 if (pmap->pm_pdir == NULL) { 2068 pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); 2069 if (pmap->pm_pdir == NULL) 2070 return (0); 2071 #ifdef PMAP_PAE_COMP 2072 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 2073 KASSERT(((vm_offset_t)pmap->pm_pdpt & 2074 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 2075 ("pmap_pinit: pdpt misaligned")); 2076 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 2077 ("pmap_pinit: pdpt above 4g")); 2078 #endif 2079 vm_radix_init(&pmap->pm_root); 2080 } 2081 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2082 ("pmap_pinit: pmap has reserved page table page(s)")); 2083 2084 /* 2085 * allocate the page directory page(s) 2086 */ 2087 for (i = 0; i < NPGPTD; i++) { 2088 pmap->pm_ptdpg[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED | 2089 VM_ALLOC_ZERO | VM_ALLOC_WAITOK); 2090 #ifdef PMAP_PAE_COMP 2091 pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(pmap->pm_ptdpg[i]) | PG_V; 2092 #endif 2093 } 2094 2095 pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD); 2096 #ifdef PMAP_PAE_COMP 2097 if ((cpu_feature & CPUID_PAT) == 0) { 2098 pmap_invalidate_cache_range( 2099 trunc_page((vm_offset_t)pmap->pm_pdpt), 2100 round_page((vm_offset_t)pmap->pm_pdpt + 2101 NPGPTD * sizeof(pdpt_entry_t))); 2102 } 2103 #endif 2104 2105 /* Install the trampoline mapping. */ 2106 pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; 2107 2108 CPU_ZERO(&pmap->pm_active); 2109 TAILQ_INIT(&pmap->pm_pvchunk); 2110 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2111 2112 return (1); 2113 } 2114 2115 /* 2116 * this routine is called if the page table page is not 2117 * mapped correctly. 2118 */ 2119 static vm_page_t 2120 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) 2121 { 2122 vm_paddr_t ptepa; 2123 vm_page_t m; 2124 2125 /* 2126 * Allocate a page table page. 2127 */ 2128 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 2129 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2130 PMAP_UNLOCK(pmap); 2131 rw_wunlock(&pvh_global_lock); 2132 vm_wait(NULL); 2133 rw_wlock(&pvh_global_lock); 2134 PMAP_LOCK(pmap); 2135 } 2136 2137 /* 2138 * Indicate the need to retry. While waiting, the page table 2139 * page may have been allocated. 2140 */ 2141 return (NULL); 2142 } 2143 m->pindex = ptepindex; 2144 2145 /* 2146 * Map the pagetable page into the process address space, if 2147 * it isn't already there. 2148 */ 2149 2150 pmap->pm_stats.resident_count++; 2151 2152 ptepa = VM_PAGE_TO_PHYS(m); 2153 KASSERT((pmap->pm_pdir[ptepindex] & PG_V) == 0, 2154 ("%s: page directory entry %#jx is valid", 2155 __func__, (uintmax_t)pmap->pm_pdir[ptepindex])); 2156 pmap->pm_pdir[ptepindex] = 2157 (pd_entry_t)(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 2158 2159 return (m); 2160 } 2161 2162 static vm_page_t 2163 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) 2164 { 2165 u_int ptepindex; 2166 pd_entry_t ptepa; 2167 vm_page_t m; 2168 2169 /* 2170 * Calculate pagetable page index 2171 */ 2172 ptepindex = va >> PDRSHIFT; 2173 retry: 2174 /* 2175 * Get the page directory entry 2176 */ 2177 ptepa = pmap->pm_pdir[ptepindex]; 2178 2179 /* 2180 * This supports switching from a 4MB page to a 2181 * normal 4K page. 2182 */ 2183 if (ptepa & PG_PS) { 2184 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va); 2185 ptepa = pmap->pm_pdir[ptepindex]; 2186 } 2187 2188 /* 2189 * If the page table page is mapped, we just increment the 2190 * hold count, and activate it. 2191 */ 2192 if (ptepa) { 2193 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 2194 m->ref_count++; 2195 } else { 2196 /* 2197 * Here if the pte page isn't mapped, or if it has 2198 * been deallocated. 2199 */ 2200 m = _pmap_allocpte(pmap, ptepindex, flags); 2201 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2202 goto retry; 2203 } 2204 return (m); 2205 } 2206 2207 /*************************************************** 2208 * Pmap allocation/deallocation routines. 2209 ***************************************************/ 2210 2211 /* 2212 * Release any resources held by the given physical map. 2213 * Called when a pmap initialized by pmap_pinit is being released. 2214 * Should only be called if the map contains no valid mappings. 2215 */ 2216 static void 2217 __CONCAT(PMTYPE, release)(pmap_t pmap) 2218 { 2219 vm_page_t m; 2220 int i; 2221 2222 KASSERT(pmap->pm_stats.resident_count == 0, 2223 ("pmap_release: pmap resident count %ld != 0", 2224 pmap->pm_stats.resident_count)); 2225 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2226 ("pmap_release: pmap has reserved page table page(s)")); 2227 KASSERT(CPU_EMPTY(&pmap->pm_active), 2228 ("releasing active pmap %p", pmap)); 2229 2230 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 2231 2232 for (i = 0; i < NPGPTD; i++) { 2233 m = pmap->pm_ptdpg[i]; 2234 #ifdef PMAP_PAE_COMP 2235 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 2236 ("pmap_release: got wrong ptd page")); 2237 #endif 2238 vm_page_unwire_noq(m); 2239 vm_page_free(m); 2240 } 2241 } 2242 2243 /* 2244 * grow the number of kernel page table entries, if needed 2245 */ 2246 static void 2247 __CONCAT(PMTYPE, growkernel)(vm_offset_t addr) 2248 { 2249 vm_paddr_t ptppaddr; 2250 vm_page_t nkpg; 2251 pd_entry_t newpdir; 2252 2253 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2254 addr = roundup2(addr, NBPDR); 2255 if (addr - 1 >= vm_map_max(kernel_map)) 2256 addr = vm_map_max(kernel_map); 2257 while (kernel_vm_end < addr) { 2258 if (pdir_pde(PTD, kernel_vm_end)) { 2259 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2260 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2261 kernel_vm_end = vm_map_max(kernel_map); 2262 break; 2263 } 2264 continue; 2265 } 2266 2267 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | 2268 VM_ALLOC_ZERO); 2269 if (nkpg == NULL) 2270 panic("pmap_growkernel: no memory to grow kernel"); 2271 nkpg->pindex = kernel_vm_end >> PDRSHIFT; 2272 nkpt++; 2273 2274 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 2275 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 2276 pdir_pde(KPTD, kernel_vm_end) = newpdir; 2277 2278 pmap_kenter_pde(kernel_vm_end, newpdir); 2279 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2280 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2281 kernel_vm_end = vm_map_max(kernel_map); 2282 break; 2283 } 2284 } 2285 } 2286 2287 /*************************************************** 2288 * page management routines. 2289 ***************************************************/ 2290 2291 static const uint32_t pc_freemask[_NPCM] = { 2292 [0 ... _NPCM - 2] = PC_FREEN, 2293 [_NPCM - 1] = PC_FREEL 2294 }; 2295 2296 #ifdef PV_STATS 2297 extern int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2298 extern long pv_entry_frees, pv_entry_allocs; 2299 extern int pv_entry_spare; 2300 #endif 2301 2302 /* 2303 * We are in a serious low memory condition. Resort to 2304 * drastic measures to free some pages so we can allocate 2305 * another pv entry chunk. 2306 */ 2307 static vm_page_t 2308 pmap_pv_reclaim(pmap_t locked_pmap) 2309 { 2310 struct pch newtail; 2311 struct pv_chunk *pc; 2312 struct md_page *pvh; 2313 pd_entry_t *pde; 2314 pmap_t pmap; 2315 pt_entry_t *pte, tpte; 2316 pv_entry_t pv; 2317 vm_offset_t va; 2318 vm_page_t m, m_pc; 2319 struct spglist free; 2320 uint32_t inuse; 2321 int bit, field, freed; 2322 2323 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2324 pmap = NULL; 2325 m_pc = NULL; 2326 SLIST_INIT(&free); 2327 TAILQ_INIT(&newtail); 2328 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2329 SLIST_EMPTY(&free))) { 2330 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2331 if (pmap != pc->pc_pmap) { 2332 if (pmap != NULL) { 2333 pmap_invalidate_all_int(pmap); 2334 if (pmap != locked_pmap) 2335 PMAP_UNLOCK(pmap); 2336 } 2337 pmap = pc->pc_pmap; 2338 /* Avoid deadlock and lock recursion. */ 2339 if (pmap > locked_pmap) 2340 PMAP_LOCK(pmap); 2341 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2342 pmap = NULL; 2343 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2344 continue; 2345 } 2346 } 2347 2348 /* 2349 * Destroy every non-wired, 4 KB page mapping in the chunk. 2350 */ 2351 freed = 0; 2352 for (field = 0; field < _NPCM; field++) { 2353 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2354 inuse != 0; inuse &= ~(1UL << bit)) { 2355 bit = bsfl(inuse); 2356 pv = &pc->pc_pventry[field * 32 + bit]; 2357 va = pv->pv_va; 2358 pde = pmap_pde(pmap, va); 2359 if ((*pde & PG_PS) != 0) 2360 continue; 2361 pte = __CONCAT(PMTYPE, pte)(pmap, va); 2362 tpte = *pte; 2363 if ((tpte & PG_W) == 0) 2364 tpte = pte_load_clear(pte); 2365 pmap_pte_release(pte); 2366 if ((tpte & PG_W) != 0) 2367 continue; 2368 KASSERT(tpte != 0, 2369 ("pmap_pv_reclaim: pmap %p va %x zero pte", 2370 pmap, va)); 2371 if ((tpte & PG_G) != 0) 2372 pmap_invalidate_page_int(pmap, va); 2373 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2374 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2375 vm_page_dirty(m); 2376 if ((tpte & PG_A) != 0) 2377 vm_page_aflag_set(m, PGA_REFERENCED); 2378 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2379 if (TAILQ_EMPTY(&m->md.pv_list) && 2380 (m->flags & PG_FICTITIOUS) == 0) { 2381 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2382 if (TAILQ_EMPTY(&pvh->pv_list)) { 2383 vm_page_aflag_clear(m, 2384 PGA_WRITEABLE); 2385 } 2386 } 2387 pc->pc_map[field] |= 1UL << bit; 2388 pmap_unuse_pt(pmap, va, &free); 2389 freed++; 2390 } 2391 } 2392 if (freed == 0) { 2393 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2394 continue; 2395 } 2396 /* Every freed mapping is for a 4 KB page. */ 2397 pmap->pm_stats.resident_count -= freed; 2398 PV_STAT(pv_entry_frees += freed); 2399 PV_STAT(pv_entry_spare += freed); 2400 pv_entry_count -= freed; 2401 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2402 for (field = 0; field < _NPCM; field++) 2403 if (pc->pc_map[field] != pc_freemask[field]) { 2404 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2405 pc_list); 2406 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2407 2408 /* 2409 * One freed pv entry in locked_pmap is 2410 * sufficient. 2411 */ 2412 if (pmap == locked_pmap) 2413 goto out; 2414 break; 2415 } 2416 if (field == _NPCM) { 2417 PV_STAT(pv_entry_spare -= _NPCPV); 2418 PV_STAT(pc_chunk_count--); 2419 PV_STAT(pc_chunk_frees++); 2420 /* Entire chunk is free; return it. */ 2421 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2422 pmap_qremove((vm_offset_t)pc, 1); 2423 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2424 break; 2425 } 2426 } 2427 out: 2428 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2429 if (pmap != NULL) { 2430 pmap_invalidate_all_int(pmap); 2431 if (pmap != locked_pmap) 2432 PMAP_UNLOCK(pmap); 2433 } 2434 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2435 m_pc = SLIST_FIRST(&free); 2436 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2437 /* Recycle a freed page table page. */ 2438 m_pc->ref_count = 1; 2439 } 2440 vm_page_free_pages_toq(&free, true); 2441 return (m_pc); 2442 } 2443 2444 /* 2445 * free the pv_entry back to the free list 2446 */ 2447 static void 2448 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2449 { 2450 struct pv_chunk *pc; 2451 int idx, field, bit; 2452 2453 rw_assert(&pvh_global_lock, RA_WLOCKED); 2454 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2455 PV_STAT(pv_entry_frees++); 2456 PV_STAT(pv_entry_spare++); 2457 pv_entry_count--; 2458 pc = pv_to_chunk(pv); 2459 idx = pv - &pc->pc_pventry[0]; 2460 field = idx / 32; 2461 bit = idx % 32; 2462 pc->pc_map[field] |= 1ul << bit; 2463 for (idx = 0; idx < _NPCM; idx++) 2464 if (pc->pc_map[idx] != pc_freemask[idx]) { 2465 /* 2466 * 98% of the time, pc is already at the head of the 2467 * list. If it isn't already, move it to the head. 2468 */ 2469 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2470 pc)) { 2471 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2472 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2473 pc_list); 2474 } 2475 return; 2476 } 2477 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2478 free_pv_chunk(pc); 2479 } 2480 2481 static void 2482 free_pv_chunk(struct pv_chunk *pc) 2483 { 2484 vm_page_t m; 2485 2486 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2487 PV_STAT(pv_entry_spare -= _NPCPV); 2488 PV_STAT(pc_chunk_count--); 2489 PV_STAT(pc_chunk_frees++); 2490 /* entire chunk is free, return it */ 2491 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2492 pmap_qremove((vm_offset_t)pc, 1); 2493 vm_page_unwire_noq(m); 2494 vm_page_free(m); 2495 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2496 } 2497 2498 /* 2499 * get a new pv_entry, allocating a block from the system 2500 * when needed. 2501 */ 2502 static pv_entry_t 2503 get_pv_entry(pmap_t pmap, boolean_t try) 2504 { 2505 static const struct timeval printinterval = { 60, 0 }; 2506 static struct timeval lastprint; 2507 int bit, field; 2508 pv_entry_t pv; 2509 struct pv_chunk *pc; 2510 vm_page_t m; 2511 2512 rw_assert(&pvh_global_lock, RA_WLOCKED); 2513 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2514 PV_STAT(pv_entry_allocs++); 2515 pv_entry_count++; 2516 if (pv_entry_count > pv_entry_high_water) 2517 if (ratecheck(&lastprint, &printinterval)) 2518 printf("Approaching the limit on PV entries, consider " 2519 "increasing either the vm.pmap.shpgperproc or the " 2520 "vm.pmap.pv_entries tunable.\n"); 2521 retry: 2522 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2523 if (pc != NULL) { 2524 for (field = 0; field < _NPCM; field++) { 2525 if (pc->pc_map[field]) { 2526 bit = bsfl(pc->pc_map[field]); 2527 break; 2528 } 2529 } 2530 if (field < _NPCM) { 2531 pv = &pc->pc_pventry[field * 32 + bit]; 2532 pc->pc_map[field] &= ~(1ul << bit); 2533 /* If this was the last item, move it to tail */ 2534 for (field = 0; field < _NPCM; field++) 2535 if (pc->pc_map[field] != 0) { 2536 PV_STAT(pv_entry_spare--); 2537 return (pv); /* not full, return */ 2538 } 2539 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2540 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2541 PV_STAT(pv_entry_spare--); 2542 return (pv); 2543 } 2544 } 2545 /* 2546 * Access to the ptelist "pv_vafree" is synchronized by the pvh 2547 * global lock. If "pv_vafree" is currently non-empty, it will 2548 * remain non-empty until pmap_ptelist_alloc() completes. 2549 */ 2550 if (pv_vafree == 0 || 2551 (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2552 if (try) { 2553 pv_entry_count--; 2554 PV_STAT(pc_chunk_tryfail++); 2555 return (NULL); 2556 } 2557 m = pmap_pv_reclaim(pmap); 2558 if (m == NULL) 2559 goto retry; 2560 } 2561 PV_STAT(pc_chunk_count++); 2562 PV_STAT(pc_chunk_allocs++); 2563 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2564 pmap_qenter((vm_offset_t)pc, &m, 1); 2565 pc->pc_pmap = pmap; 2566 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2567 for (field = 1; field < _NPCM; field++) 2568 pc->pc_map[field] = pc_freemask[field]; 2569 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2570 pv = &pc->pc_pventry[0]; 2571 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2572 PV_STAT(pv_entry_spare += _NPCPV - 1); 2573 return (pv); 2574 } 2575 2576 static __inline pv_entry_t 2577 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2578 { 2579 pv_entry_t pv; 2580 2581 rw_assert(&pvh_global_lock, RA_WLOCKED); 2582 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 2583 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2584 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 2585 break; 2586 } 2587 } 2588 return (pv); 2589 } 2590 2591 static void 2592 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2593 { 2594 struct md_page *pvh; 2595 pv_entry_t pv; 2596 vm_offset_t va_last; 2597 vm_page_t m; 2598 2599 rw_assert(&pvh_global_lock, RA_WLOCKED); 2600 KASSERT((pa & PDRMASK) == 0, 2601 ("pmap_pv_demote_pde: pa is not 4mpage aligned")); 2602 2603 /* 2604 * Transfer the 4mpage's pv entry for this mapping to the first 2605 * page's pv list. 2606 */ 2607 pvh = pa_to_pvh(pa); 2608 va = trunc_4mpage(va); 2609 pv = pmap_pvh_remove(pvh, pmap, va); 2610 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 2611 m = PHYS_TO_VM_PAGE(pa); 2612 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2613 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 2614 va_last = va + NBPDR - PAGE_SIZE; 2615 do { 2616 m++; 2617 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2618 ("pmap_pv_demote_pde: page %p is not managed", m)); 2619 va += PAGE_SIZE; 2620 pmap_insert_entry(pmap, va, m); 2621 } while (va < va_last); 2622 } 2623 2624 #if VM_NRESERVLEVEL > 0 2625 static void 2626 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2627 { 2628 struct md_page *pvh; 2629 pv_entry_t pv; 2630 vm_offset_t va_last; 2631 vm_page_t m; 2632 2633 rw_assert(&pvh_global_lock, RA_WLOCKED); 2634 KASSERT((pa & PDRMASK) == 0, 2635 ("pmap_pv_promote_pde: pa is not 4mpage aligned")); 2636 2637 /* 2638 * Transfer the first page's pv entry for this mapping to the 2639 * 4mpage's pv list. Aside from avoiding the cost of a call 2640 * to get_pv_entry(), a transfer avoids the possibility that 2641 * get_pv_entry() calls pmap_collect() and that pmap_collect() 2642 * removes one of the mappings that is being promoted. 2643 */ 2644 m = PHYS_TO_VM_PAGE(pa); 2645 va = trunc_4mpage(va); 2646 pv = pmap_pvh_remove(&m->md, pmap, va); 2647 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 2648 pvh = pa_to_pvh(pa); 2649 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2650 /* Free the remaining NPTEPG - 1 pv entries. */ 2651 va_last = va + NBPDR - PAGE_SIZE; 2652 do { 2653 m++; 2654 va += PAGE_SIZE; 2655 pmap_pvh_free(&m->md, pmap, va); 2656 } while (va < va_last); 2657 } 2658 #endif /* VM_NRESERVLEVEL > 0 */ 2659 2660 static void 2661 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2662 { 2663 pv_entry_t pv; 2664 2665 pv = pmap_pvh_remove(pvh, pmap, va); 2666 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2667 free_pv_entry(pmap, pv); 2668 } 2669 2670 static void 2671 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2672 { 2673 struct md_page *pvh; 2674 2675 rw_assert(&pvh_global_lock, RA_WLOCKED); 2676 pmap_pvh_free(&m->md, pmap, va); 2677 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 2678 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2679 if (TAILQ_EMPTY(&pvh->pv_list)) 2680 vm_page_aflag_clear(m, PGA_WRITEABLE); 2681 } 2682 } 2683 2684 /* 2685 * Create a pv entry for page at pa for 2686 * (pmap, va). 2687 */ 2688 static void 2689 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2690 { 2691 pv_entry_t pv; 2692 2693 rw_assert(&pvh_global_lock, RA_WLOCKED); 2694 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2695 pv = get_pv_entry(pmap, FALSE); 2696 pv->pv_va = va; 2697 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2698 } 2699 2700 /* 2701 * Conditionally create a pv entry. 2702 */ 2703 static boolean_t 2704 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2705 { 2706 pv_entry_t pv; 2707 2708 rw_assert(&pvh_global_lock, RA_WLOCKED); 2709 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2710 if (pv_entry_count < pv_entry_high_water && 2711 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2712 pv->pv_va = va; 2713 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2714 return (TRUE); 2715 } else 2716 return (FALSE); 2717 } 2718 2719 /* 2720 * Create the pv entries for each of the pages within a superpage. 2721 */ 2722 static bool 2723 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags) 2724 { 2725 struct md_page *pvh; 2726 pv_entry_t pv; 2727 bool noreclaim; 2728 2729 rw_assert(&pvh_global_lock, RA_WLOCKED); 2730 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 2731 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 2732 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 2733 return (false); 2734 pv->pv_va = va; 2735 pvh = pa_to_pvh(pde & PG_PS_FRAME); 2736 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2737 return (true); 2738 } 2739 2740 /* 2741 * Fills a page table page with mappings to consecutive physical pages. 2742 */ 2743 static void 2744 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 2745 { 2746 pt_entry_t *pte; 2747 2748 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 2749 *pte = newpte; 2750 newpte += PAGE_SIZE; 2751 } 2752 } 2753 2754 /* 2755 * Tries to demote a 2- or 4MB page mapping. If demotion fails, the 2756 * 2- or 4MB page mapping is invalidated. 2757 */ 2758 static boolean_t 2759 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2760 { 2761 pd_entry_t newpde, oldpde; 2762 pt_entry_t *firstpte, newpte; 2763 vm_paddr_t mptepa; 2764 vm_page_t mpte; 2765 struct spglist free; 2766 vm_offset_t sva; 2767 2768 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2769 oldpde = *pde; 2770 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), 2771 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); 2772 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == 2773 NULL) { 2774 KASSERT((oldpde & PG_W) == 0, 2775 ("pmap_demote_pde: page table page for a wired mapping" 2776 " is missing")); 2777 2778 /* 2779 * Invalidate the 2- or 4MB page mapping and return 2780 * "failure" if the mapping was never accessed or the 2781 * allocation of the new page table page fails. 2782 */ 2783 if ((oldpde & PG_A) == 0 || 2784 (mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { 2785 SLIST_INIT(&free); 2786 sva = trunc_4mpage(va); 2787 pmap_remove_pde(pmap, pde, sva, &free); 2788 if ((oldpde & PG_G) == 0) 2789 pmap_invalidate_pde_page(pmap, sva, oldpde); 2790 vm_page_free_pages_toq(&free, true); 2791 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" 2792 " in pmap %p", va, pmap); 2793 return (FALSE); 2794 } 2795 mpte->pindex = va >> PDRSHIFT; 2796 if (pmap != kernel_pmap) { 2797 mpte->ref_count = NPTEPG; 2798 pmap->pm_stats.resident_count++; 2799 } 2800 } 2801 mptepa = VM_PAGE_TO_PHYS(mpte); 2802 2803 /* 2804 * If the page mapping is in the kernel's address space, then the 2805 * KPTmap can provide access to the page table page. Otherwise, 2806 * temporarily map the page table page (mpte) into the kernel's 2807 * address space at either PADDR1 or PADDR2. 2808 */ 2809 if (pmap == kernel_pmap) 2810 firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; 2811 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 2812 if ((*PMAP1 & PG_FRAME) != mptepa) { 2813 *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2814 #ifdef SMP 2815 PMAP1cpu = PCPU_GET(cpuid); 2816 #endif 2817 invlcaddr(PADDR1); 2818 PMAP1changed++; 2819 } else 2820 #ifdef SMP 2821 if (PMAP1cpu != PCPU_GET(cpuid)) { 2822 PMAP1cpu = PCPU_GET(cpuid); 2823 invlcaddr(PADDR1); 2824 PMAP1changedcpu++; 2825 } else 2826 #endif 2827 PMAP1unchanged++; 2828 firstpte = PADDR1; 2829 } else { 2830 mtx_lock(&PMAP2mutex); 2831 if ((*PMAP2 & PG_FRAME) != mptepa) { 2832 *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2833 pmap_invalidate_page_int(kernel_pmap, 2834 (vm_offset_t)PADDR2); 2835 } 2836 firstpte = PADDR2; 2837 } 2838 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; 2839 KASSERT((oldpde & PG_A) != 0, 2840 ("pmap_demote_pde: oldpde is missing PG_A")); 2841 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 2842 ("pmap_demote_pde: oldpde is missing PG_M")); 2843 newpte = oldpde & ~PG_PS; 2844 if ((newpte & PG_PDE_PAT) != 0) 2845 newpte ^= PG_PDE_PAT | PG_PTE_PAT; 2846 2847 /* 2848 * If the page table page is not leftover from an earlier promotion, 2849 * initialize it. 2850 */ 2851 if (vm_page_none_valid(mpte)) 2852 pmap_fill_ptp(firstpte, newpte); 2853 2854 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), 2855 ("pmap_demote_pde: firstpte and newpte map different physical" 2856 " addresses")); 2857 2858 /* 2859 * If the mapping has changed attributes, update the page table 2860 * entries. 2861 */ 2862 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) 2863 pmap_fill_ptp(firstpte, newpte); 2864 2865 /* 2866 * Demote the mapping. This pmap is locked. The old PDE has 2867 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 2868 * set. Thus, there is no danger of a race with another 2869 * processor changing the setting of PG_A and/or PG_M between 2870 * the read above and the store below. 2871 */ 2872 if (workaround_erratum383) 2873 pmap_update_pde(pmap, va, pde, newpde); 2874 else if (pmap == kernel_pmap) 2875 pmap_kenter_pde(va, newpde); 2876 else 2877 pde_store(pde, newpde); 2878 if (firstpte == PADDR2) 2879 mtx_unlock(&PMAP2mutex); 2880 2881 /* 2882 * Invalidate the recursive mapping of the page table page. 2883 */ 2884 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2885 2886 /* 2887 * Demote the pv entry. This depends on the earlier demotion 2888 * of the mapping. Specifically, the (re)creation of a per- 2889 * page pv entry might trigger the execution of pmap_collect(), 2890 * which might reclaim a newly (re)created per-page pv entry 2891 * and destroy the associated mapping. In order to destroy 2892 * the mapping, the PDE must have already changed from mapping 2893 * the 2mpage to referencing the page table page. 2894 */ 2895 if ((oldpde & PG_MANAGED) != 0) 2896 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); 2897 2898 pmap_pde_demotions++; 2899 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" 2900 " in pmap %p", va, pmap); 2901 return (TRUE); 2902 } 2903 2904 /* 2905 * Removes a 2- or 4MB page mapping from the kernel pmap. 2906 */ 2907 static void 2908 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2909 { 2910 pd_entry_t newpde; 2911 vm_paddr_t mptepa; 2912 vm_page_t mpte; 2913 2914 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2915 mpte = pmap_remove_pt_page(pmap, va); 2916 if (mpte == NULL) 2917 panic("pmap_remove_kernel_pde: Missing pt page."); 2918 2919 mptepa = VM_PAGE_TO_PHYS(mpte); 2920 newpde = mptepa | PG_M | PG_A | PG_RW | PG_V; 2921 2922 /* 2923 * If this page table page was unmapped by a promotion, then it 2924 * contains valid mappings. Zero it to invalidate those mappings. 2925 */ 2926 if (vm_page_any_valid(mpte)) 2927 pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]); 2928 2929 /* 2930 * Remove the mapping. 2931 */ 2932 if (workaround_erratum383) 2933 pmap_update_pde(pmap, va, pde, newpde); 2934 else 2935 pmap_kenter_pde(va, newpde); 2936 2937 /* 2938 * Invalidate the recursive mapping of the page table page. 2939 */ 2940 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2941 } 2942 2943 /* 2944 * pmap_remove_pde: do the things to unmap a superpage in a process 2945 */ 2946 static void 2947 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 2948 struct spglist *free) 2949 { 2950 struct md_page *pvh; 2951 pd_entry_t oldpde; 2952 vm_offset_t eva, va; 2953 vm_page_t m, mpte; 2954 2955 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2956 KASSERT((sva & PDRMASK) == 0, 2957 ("pmap_remove_pde: sva is not 4mpage aligned")); 2958 oldpde = pte_load_clear(pdq); 2959 if (oldpde & PG_W) 2960 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; 2961 2962 /* 2963 * Machines that don't support invlpg, also don't support 2964 * PG_G. 2965 */ 2966 if ((oldpde & PG_G) != 0) 2967 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 2968 2969 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2970 if (oldpde & PG_MANAGED) { 2971 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 2972 pmap_pvh_free(pvh, pmap, sva); 2973 eva = sva + NBPDR; 2974 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 2975 va < eva; va += PAGE_SIZE, m++) { 2976 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2977 vm_page_dirty(m); 2978 if (oldpde & PG_A) 2979 vm_page_aflag_set(m, PGA_REFERENCED); 2980 if (TAILQ_EMPTY(&m->md.pv_list) && 2981 TAILQ_EMPTY(&pvh->pv_list)) 2982 vm_page_aflag_clear(m, PGA_WRITEABLE); 2983 } 2984 } 2985 if (pmap == kernel_pmap) { 2986 pmap_remove_kernel_pde(pmap, pdq, sva); 2987 } else { 2988 mpte = pmap_remove_pt_page(pmap, sva); 2989 if (mpte != NULL) { 2990 KASSERT(vm_page_all_valid(mpte), 2991 ("pmap_remove_pde: pte page not promoted")); 2992 pmap->pm_stats.resident_count--; 2993 KASSERT(mpte->ref_count == NPTEPG, 2994 ("pmap_remove_pde: pte page ref count error")); 2995 mpte->ref_count = 0; 2996 pmap_add_delayed_free_list(mpte, free, FALSE); 2997 } 2998 } 2999 } 3000 3001 /* 3002 * pmap_remove_pte: do the things to unmap a page in a process 3003 */ 3004 static int 3005 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 3006 struct spglist *free) 3007 { 3008 pt_entry_t oldpte; 3009 vm_page_t m; 3010 3011 rw_assert(&pvh_global_lock, RA_WLOCKED); 3012 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3013 oldpte = pte_load_clear(ptq); 3014 KASSERT(oldpte != 0, 3015 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va)); 3016 if (oldpte & PG_W) 3017 pmap->pm_stats.wired_count -= 1; 3018 /* 3019 * Machines that don't support invlpg, also don't support 3020 * PG_G. 3021 */ 3022 if (oldpte & PG_G) 3023 pmap_invalidate_page_int(kernel_pmap, va); 3024 pmap->pm_stats.resident_count -= 1; 3025 if (oldpte & PG_MANAGED) { 3026 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 3027 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3028 vm_page_dirty(m); 3029 if (oldpte & PG_A) 3030 vm_page_aflag_set(m, PGA_REFERENCED); 3031 pmap_remove_entry(pmap, m, va); 3032 } 3033 return (pmap_unuse_pt(pmap, va, free)); 3034 } 3035 3036 /* 3037 * Remove a single page from a process address space 3038 */ 3039 static void 3040 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 3041 { 3042 pt_entry_t *pte; 3043 3044 rw_assert(&pvh_global_lock, RA_WLOCKED); 3045 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3046 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3047 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) 3048 return; 3049 pmap_remove_pte(pmap, pte, va, free); 3050 pmap_invalidate_page_int(pmap, va); 3051 } 3052 3053 /* 3054 * Removes the specified range of addresses from the page table page. 3055 */ 3056 static bool 3057 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3058 struct spglist *free) 3059 { 3060 pt_entry_t *pte; 3061 bool anyvalid; 3062 3063 rw_assert(&pvh_global_lock, RA_WLOCKED); 3064 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3065 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3066 anyvalid = false; 3067 for (pte = pmap_pte_quick(pmap, sva); sva != eva; pte++, 3068 sva += PAGE_SIZE) { 3069 if (*pte == 0) 3070 continue; 3071 3072 /* 3073 * The TLB entry for a PG_G mapping is invalidated by 3074 * pmap_remove_pte(). 3075 */ 3076 if ((*pte & PG_G) == 0) 3077 anyvalid = true; 3078 3079 if (pmap_remove_pte(pmap, pte, sva, free)) 3080 break; 3081 } 3082 return (anyvalid); 3083 } 3084 3085 /* 3086 * Remove the given range of addresses from the specified map. 3087 * 3088 * It is assumed that the start and end are properly 3089 * rounded to the page size. 3090 */ 3091 static void 3092 __CONCAT(PMTYPE, remove)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3093 { 3094 vm_offset_t pdnxt; 3095 pd_entry_t ptpaddr; 3096 struct spglist free; 3097 int anyvalid; 3098 3099 /* 3100 * Perform an unsynchronized read. This is, however, safe. 3101 */ 3102 if (pmap->pm_stats.resident_count == 0) 3103 return; 3104 3105 anyvalid = 0; 3106 SLIST_INIT(&free); 3107 3108 rw_wlock(&pvh_global_lock); 3109 sched_pin(); 3110 PMAP_LOCK(pmap); 3111 3112 /* 3113 * special handling of removing one page. a very 3114 * common operation and easy to short circuit some 3115 * code. 3116 */ 3117 if ((sva + PAGE_SIZE == eva) && 3118 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 3119 pmap_remove_page(pmap, sva, &free); 3120 goto out; 3121 } 3122 3123 for (; sva < eva; sva = pdnxt) { 3124 u_int pdirindex; 3125 3126 /* 3127 * Calculate index for next page table. 3128 */ 3129 pdnxt = (sva + NBPDR) & ~PDRMASK; 3130 if (pdnxt < sva) 3131 pdnxt = eva; 3132 if (pmap->pm_stats.resident_count == 0) 3133 break; 3134 3135 pdirindex = sva >> PDRSHIFT; 3136 ptpaddr = pmap->pm_pdir[pdirindex]; 3137 3138 /* 3139 * Weed out invalid mappings. Note: we assume that the page 3140 * directory table is always allocated, and in kernel virtual. 3141 */ 3142 if (ptpaddr == 0) 3143 continue; 3144 3145 /* 3146 * Check for large page. 3147 */ 3148 if ((ptpaddr & PG_PS) != 0) { 3149 /* 3150 * Are we removing the entire large page? If not, 3151 * demote the mapping and fall through. 3152 */ 3153 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3154 /* 3155 * The TLB entry for a PG_G mapping is 3156 * invalidated by pmap_remove_pde(). 3157 */ 3158 if ((ptpaddr & PG_G) == 0) 3159 anyvalid = 1; 3160 pmap_remove_pde(pmap, 3161 &pmap->pm_pdir[pdirindex], sva, &free); 3162 continue; 3163 } else if (!pmap_demote_pde(pmap, 3164 &pmap->pm_pdir[pdirindex], sva)) { 3165 /* The large page mapping was destroyed. */ 3166 continue; 3167 } 3168 } 3169 3170 /* 3171 * Limit our scan to either the end of the va represented 3172 * by the current page table page, or to the end of the 3173 * range being removed. 3174 */ 3175 if (pdnxt > eva) 3176 pdnxt = eva; 3177 3178 if (pmap_remove_ptes(pmap, sva, pdnxt, &free)) 3179 anyvalid = 1; 3180 } 3181 out: 3182 sched_unpin(); 3183 if (anyvalid) 3184 pmap_invalidate_all_int(pmap); 3185 rw_wunlock(&pvh_global_lock); 3186 PMAP_UNLOCK(pmap); 3187 vm_page_free_pages_toq(&free, true); 3188 } 3189 3190 /* 3191 * Routine: pmap_remove_all 3192 * Function: 3193 * Removes this physical page from 3194 * all physical maps in which it resides. 3195 * Reflects back modify bits to the pager. 3196 * 3197 * Notes: 3198 * Original versions of this routine were very 3199 * inefficient because they iteratively called 3200 * pmap_remove (slow...) 3201 */ 3202 3203 static void 3204 __CONCAT(PMTYPE, remove_all)(vm_page_t m) 3205 { 3206 struct md_page *pvh; 3207 pv_entry_t pv; 3208 pmap_t pmap; 3209 pt_entry_t *pte, tpte; 3210 pd_entry_t *pde; 3211 vm_offset_t va; 3212 struct spglist free; 3213 3214 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3215 ("pmap_remove_all: page %p is not managed", m)); 3216 SLIST_INIT(&free); 3217 rw_wlock(&pvh_global_lock); 3218 sched_pin(); 3219 if ((m->flags & PG_FICTITIOUS) != 0) 3220 goto small_mappings; 3221 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3222 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 3223 va = pv->pv_va; 3224 pmap = PV_PMAP(pv); 3225 PMAP_LOCK(pmap); 3226 pde = pmap_pde(pmap, va); 3227 (void)pmap_demote_pde(pmap, pde, va); 3228 PMAP_UNLOCK(pmap); 3229 } 3230 small_mappings: 3231 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3232 pmap = PV_PMAP(pv); 3233 PMAP_LOCK(pmap); 3234 pmap->pm_stats.resident_count--; 3235 pde = pmap_pde(pmap, pv->pv_va); 3236 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" 3237 " a 4mpage in page %p's pv list", m)); 3238 pte = pmap_pte_quick(pmap, pv->pv_va); 3239 tpte = pte_load_clear(pte); 3240 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte", 3241 pmap, pv->pv_va)); 3242 if (tpte & PG_W) 3243 pmap->pm_stats.wired_count--; 3244 if (tpte & PG_A) 3245 vm_page_aflag_set(m, PGA_REFERENCED); 3246 3247 /* 3248 * Update the vm_page_t clean and reference bits. 3249 */ 3250 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3251 vm_page_dirty(m); 3252 pmap_unuse_pt(pmap, pv->pv_va, &free); 3253 pmap_invalidate_page_int(pmap, pv->pv_va); 3254 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 3255 free_pv_entry(pmap, pv); 3256 PMAP_UNLOCK(pmap); 3257 } 3258 vm_page_aflag_clear(m, PGA_WRITEABLE); 3259 sched_unpin(); 3260 rw_wunlock(&pvh_global_lock); 3261 vm_page_free_pages_toq(&free, true); 3262 } 3263 3264 /* 3265 * pmap_protect_pde: do the things to protect a 4mpage in a process 3266 */ 3267 static boolean_t 3268 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) 3269 { 3270 pd_entry_t newpde, oldpde; 3271 vm_page_t m, mt; 3272 boolean_t anychanged; 3273 3274 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3275 KASSERT((sva & PDRMASK) == 0, 3276 ("pmap_protect_pde: sva is not 4mpage aligned")); 3277 anychanged = FALSE; 3278 retry: 3279 oldpde = newpde = *pde; 3280 if ((prot & VM_PROT_WRITE) == 0) { 3281 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == 3282 (PG_MANAGED | PG_M | PG_RW)) { 3283 m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3284 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 3285 vm_page_dirty(mt); 3286 } 3287 newpde &= ~(PG_RW | PG_M); 3288 } 3289 #ifdef PMAP_PAE_COMP 3290 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3291 newpde |= pg_nx; 3292 #endif 3293 if (newpde != oldpde) { 3294 /* 3295 * As an optimization to future operations on this PDE, clear 3296 * PG_PROMOTED. The impending invalidation will remove any 3297 * lingering 4KB page mappings from the TLB. 3298 */ 3299 if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED)) 3300 goto retry; 3301 if ((oldpde & PG_G) != 0) 3302 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 3303 else 3304 anychanged = TRUE; 3305 } 3306 return (anychanged); 3307 } 3308 3309 /* 3310 * Set the physical protection on the 3311 * specified range of this map as requested. 3312 */ 3313 static void 3314 __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3315 vm_prot_t prot) 3316 { 3317 vm_offset_t pdnxt; 3318 pd_entry_t ptpaddr; 3319 pt_entry_t *pte; 3320 boolean_t anychanged, pv_lists_locked; 3321 3322 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 3323 if (prot == VM_PROT_NONE) { 3324 pmap_remove(pmap, sva, eva); 3325 return; 3326 } 3327 3328 #ifdef PMAP_PAE_COMP 3329 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 3330 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 3331 return; 3332 #else 3333 if (prot & VM_PROT_WRITE) 3334 return; 3335 #endif 3336 3337 if (pmap_is_current(pmap)) 3338 pv_lists_locked = FALSE; 3339 else { 3340 pv_lists_locked = TRUE; 3341 resume: 3342 rw_wlock(&pvh_global_lock); 3343 sched_pin(); 3344 } 3345 anychanged = FALSE; 3346 3347 PMAP_LOCK(pmap); 3348 for (; sva < eva; sva = pdnxt) { 3349 pt_entry_t obits, pbits; 3350 u_int pdirindex; 3351 3352 pdnxt = (sva + NBPDR) & ~PDRMASK; 3353 if (pdnxt < sva) 3354 pdnxt = eva; 3355 3356 pdirindex = sva >> PDRSHIFT; 3357 ptpaddr = pmap->pm_pdir[pdirindex]; 3358 3359 /* 3360 * Weed out invalid mappings. Note: we assume that the page 3361 * directory table is always allocated, and in kernel virtual. 3362 */ 3363 if (ptpaddr == 0) 3364 continue; 3365 3366 /* 3367 * Check for large page. 3368 */ 3369 if ((ptpaddr & PG_PS) != 0) { 3370 /* 3371 * Are we protecting the entire large page? If not, 3372 * demote the mapping and fall through. 3373 */ 3374 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3375 /* 3376 * The TLB entry for a PG_G mapping is 3377 * invalidated by pmap_protect_pde(). 3378 */ 3379 if (pmap_protect_pde(pmap, 3380 &pmap->pm_pdir[pdirindex], sva, prot)) 3381 anychanged = TRUE; 3382 continue; 3383 } else { 3384 if (!pv_lists_locked) { 3385 pv_lists_locked = TRUE; 3386 if (!rw_try_wlock(&pvh_global_lock)) { 3387 if (anychanged) 3388 pmap_invalidate_all_int( 3389 pmap); 3390 PMAP_UNLOCK(pmap); 3391 goto resume; 3392 } 3393 sched_pin(); 3394 } 3395 if (!pmap_demote_pde(pmap, 3396 &pmap->pm_pdir[pdirindex], sva)) { 3397 /* 3398 * The large page mapping was 3399 * destroyed. 3400 */ 3401 continue; 3402 } 3403 } 3404 } 3405 3406 if (pdnxt > eva) 3407 pdnxt = eva; 3408 3409 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 3410 sva += PAGE_SIZE) { 3411 vm_page_t m; 3412 3413 retry: 3414 /* 3415 * Regardless of whether a pte is 32 or 64 bits in 3416 * size, PG_RW, PG_A, and PG_M are among the least 3417 * significant 32 bits. 3418 */ 3419 obits = pbits = *pte; 3420 if ((pbits & PG_V) == 0) 3421 continue; 3422 3423 if ((prot & VM_PROT_WRITE) == 0) { 3424 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 3425 (PG_MANAGED | PG_M | PG_RW)) { 3426 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 3427 vm_page_dirty(m); 3428 } 3429 pbits &= ~(PG_RW | PG_M); 3430 } 3431 #ifdef PMAP_PAE_COMP 3432 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3433 pbits |= pg_nx; 3434 #endif 3435 3436 if (pbits != obits) { 3437 #ifdef PMAP_PAE_COMP 3438 if (!atomic_cmpset_64(pte, obits, pbits)) 3439 goto retry; 3440 #else 3441 if (!atomic_cmpset_int((u_int *)pte, obits, 3442 pbits)) 3443 goto retry; 3444 #endif 3445 if (obits & PG_G) 3446 pmap_invalidate_page_int(pmap, sva); 3447 else 3448 anychanged = TRUE; 3449 } 3450 } 3451 } 3452 if (anychanged) 3453 pmap_invalidate_all_int(pmap); 3454 if (pv_lists_locked) { 3455 sched_unpin(); 3456 rw_wunlock(&pvh_global_lock); 3457 } 3458 PMAP_UNLOCK(pmap); 3459 } 3460 3461 #if VM_NRESERVLEVEL > 0 3462 /* 3463 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are 3464 * within a single page table page (PTP) to a single 2- or 4MB page mapping. 3465 * For promotion to occur, two conditions must be met: (1) the 4KB page 3466 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3467 * mappings must have identical characteristics. 3468 * 3469 * Managed (PG_MANAGED) mappings within the kernel address space are not 3470 * promoted. The reason is that kernel PDEs are replicated in each pmap but 3471 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel 3472 * pmap. 3473 */ 3474 static void 3475 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 3476 { 3477 pd_entry_t newpde; 3478 pt_entry_t *firstpte, oldpte, pa, *pte; 3479 #ifdef KTR 3480 vm_offset_t oldpteva; 3481 #endif 3482 vm_page_t mpte; 3483 3484 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3485 3486 /* 3487 * Examine the first PTE in the specified PTP. Abort if this PTE is 3488 * either invalid, unused, or does not map the first 4KB physical page 3489 * within a 2- or 4MB page. 3490 */ 3491 firstpte = pmap_pte_quick(pmap, trunc_4mpage(va)); 3492 setpde: 3493 newpde = *firstpte; 3494 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 3495 pmap_pde_p_failures++; 3496 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3497 " in pmap %p", va, pmap); 3498 return; 3499 } 3500 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) { 3501 pmap_pde_p_failures++; 3502 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3503 " in pmap %p", va, pmap); 3504 return; 3505 } 3506 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 3507 /* 3508 * When PG_M is already clear, PG_RW can be cleared without 3509 * a TLB invalidation. 3510 */ 3511 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde & 3512 ~PG_RW)) 3513 goto setpde; 3514 newpde &= ~PG_RW; 3515 } 3516 3517 /* 3518 * Examine each of the other PTEs in the specified PTP. Abort if this 3519 * PTE maps an unexpected 4KB physical page or does not have identical 3520 * characteristics to the first PTE. 3521 */ 3522 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; 3523 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 3524 setpte: 3525 oldpte = *pte; 3526 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 3527 pmap_pde_p_failures++; 3528 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3529 " in pmap %p", va, pmap); 3530 return; 3531 } 3532 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 3533 /* 3534 * When PG_M is already clear, PG_RW can be cleared 3535 * without a TLB invalidation. 3536 */ 3537 if (!atomic_cmpset_int((u_int *)pte, oldpte, 3538 oldpte & ~PG_RW)) 3539 goto setpte; 3540 oldpte &= ~PG_RW; 3541 #ifdef KTR 3542 oldpteva = (oldpte & PG_FRAME & PDRMASK) | 3543 (va & ~PDRMASK); 3544 #endif 3545 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x" 3546 " in pmap %p", oldpteva, pmap); 3547 } 3548 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 3549 pmap_pde_p_failures++; 3550 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3551 " in pmap %p", va, pmap); 3552 return; 3553 } 3554 pa -= PAGE_SIZE; 3555 } 3556 3557 /* 3558 * Save the page table page in its current state until the PDE 3559 * mapping the superpage is demoted by pmap_demote_pde() or 3560 * destroyed by pmap_remove_pde(). 3561 */ 3562 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3563 KASSERT(mpte >= vm_page_array && 3564 mpte < &vm_page_array[vm_page_array_size], 3565 ("pmap_promote_pde: page table page is out of range")); 3566 KASSERT(mpte->pindex == va >> PDRSHIFT, 3567 ("pmap_promote_pde: page table page's pindex is wrong")); 3568 if (pmap_insert_pt_page(pmap, mpte, true)) { 3569 pmap_pde_p_failures++; 3570 CTR2(KTR_PMAP, 3571 "pmap_promote_pde: failure for va %#x in pmap %p", va, 3572 pmap); 3573 return; 3574 } 3575 3576 /* 3577 * Promote the pv entries. 3578 */ 3579 if ((newpde & PG_MANAGED) != 0) 3580 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME); 3581 3582 /* 3583 * Propagate the PAT index to its proper position. 3584 */ 3585 if ((newpde & PG_PTE_PAT) != 0) 3586 newpde ^= PG_PDE_PAT | PG_PTE_PAT; 3587 3588 /* 3589 * Map the superpage. 3590 */ 3591 if (workaround_erratum383) 3592 pmap_update_pde(pmap, va, pde, PG_PS | newpde); 3593 else if (pmap == kernel_pmap) 3594 pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde); 3595 else 3596 pde_store(pde, PG_PROMOTED | PG_PS | newpde); 3597 3598 pmap_pde_promotions++; 3599 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" 3600 " in pmap %p", va, pmap); 3601 } 3602 #endif /* VM_NRESERVLEVEL > 0 */ 3603 3604 /* 3605 * Insert the given physical page (p) at 3606 * the specified virtual address (v) in the 3607 * target physical map with the protection requested. 3608 * 3609 * If specified, the page will be wired down, meaning 3610 * that the related pte can not be reclaimed. 3611 * 3612 * NB: This is the only routine which MAY NOT lazy-evaluate 3613 * or lose information. That is, this routine must actually 3614 * insert this page into the given map NOW. 3615 */ 3616 static int 3617 __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m, 3618 vm_prot_t prot, u_int flags, int8_t psind) 3619 { 3620 pd_entry_t *pde; 3621 pt_entry_t *pte; 3622 pt_entry_t newpte, origpte; 3623 pv_entry_t pv; 3624 vm_paddr_t opa, pa; 3625 vm_page_t mpte, om; 3626 int rv; 3627 3628 va = trunc_page(va); 3629 KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) || 3630 (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS), 3631 ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va)); 3632 KASSERT(va < PMAP_TRM_MIN_ADDRESS, 3633 ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", 3634 va)); 3635 KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || 3636 !VA_IS_CLEANMAP(va), 3637 ("pmap_enter: managed mapping within the clean submap")); 3638 if ((m->oflags & VPO_UNMANAGED) == 0) 3639 VM_PAGE_OBJECT_BUSY_ASSERT(m); 3640 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3641 ("pmap_enter: flags %u has reserved bits set", flags)); 3642 pa = VM_PAGE_TO_PHYS(m); 3643 newpte = (pt_entry_t)(pa | PG_A | PG_V); 3644 if ((flags & VM_PROT_WRITE) != 0) 3645 newpte |= PG_M; 3646 if ((prot & VM_PROT_WRITE) != 0) 3647 newpte |= PG_RW; 3648 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 3649 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 3650 #ifdef PMAP_PAE_COMP 3651 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3652 newpte |= pg_nx; 3653 #endif 3654 if ((flags & PMAP_ENTER_WIRED) != 0) 3655 newpte |= PG_W; 3656 if (pmap != kernel_pmap) 3657 newpte |= PG_U; 3658 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); 3659 if ((m->oflags & VPO_UNMANAGED) == 0) 3660 newpte |= PG_MANAGED; 3661 3662 rw_wlock(&pvh_global_lock); 3663 PMAP_LOCK(pmap); 3664 sched_pin(); 3665 if (psind == 1) { 3666 /* Assert the required virtual and physical alignment. */ 3667 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned")); 3668 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); 3669 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m); 3670 goto out; 3671 } 3672 3673 pde = pmap_pde(pmap, va); 3674 if (pmap != kernel_pmap) { 3675 /* 3676 * va is for UVA. 3677 * In the case that a page table page is not resident, 3678 * we are creating it here. pmap_allocpte() handles 3679 * demotion. 3680 */ 3681 mpte = pmap_allocpte(pmap, va, flags); 3682 if (mpte == NULL) { 3683 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3684 ("pmap_allocpte failed with sleep allowed")); 3685 rv = KERN_RESOURCE_SHORTAGE; 3686 goto out; 3687 } 3688 } else { 3689 /* 3690 * va is for KVA, so pmap_demote_pde() will never fail 3691 * to install a page table page. PG_V is also 3692 * asserted by pmap_demote_pde(). 3693 */ 3694 mpte = NULL; 3695 KASSERT(pde != NULL && (*pde & PG_V) != 0, 3696 ("KVA %#x invalid pde pdir %#jx", va, 3697 (uintmax_t)pmap->pm_pdir[PTDPTDI])); 3698 if ((*pde & PG_PS) != 0) 3699 pmap_demote_pde(pmap, pde, va); 3700 } 3701 pte = pmap_pte_quick(pmap, va); 3702 3703 /* 3704 * Page Directory table entry is not valid, which should not 3705 * happen. We should have either allocated the page table 3706 * page or demoted the existing mapping above. 3707 */ 3708 if (pte == NULL) { 3709 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 3710 (uintmax_t)pmap->pm_pdir[PTDPTDI], va); 3711 } 3712 3713 origpte = *pte; 3714 pv = NULL; 3715 3716 /* 3717 * Is the specified virtual address already mapped? 3718 */ 3719 if ((origpte & PG_V) != 0) { 3720 /* 3721 * Wiring change, just update stats. We don't worry about 3722 * wiring PT pages as they remain resident as long as there 3723 * are valid mappings in them. Hence, if a user page is wired, 3724 * the PT page will be also. 3725 */ 3726 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 3727 pmap->pm_stats.wired_count++; 3728 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 3729 pmap->pm_stats.wired_count--; 3730 3731 /* 3732 * Remove the extra PT page reference. 3733 */ 3734 if (mpte != NULL) { 3735 mpte->ref_count--; 3736 KASSERT(mpte->ref_count > 0, 3737 ("pmap_enter: missing reference to page table page," 3738 " va: 0x%x", va)); 3739 } 3740 3741 /* 3742 * Has the physical page changed? 3743 */ 3744 opa = origpte & PG_FRAME; 3745 if (opa == pa) { 3746 /* 3747 * No, might be a protection or wiring change. 3748 */ 3749 if ((origpte & PG_MANAGED) != 0 && 3750 (newpte & PG_RW) != 0) 3751 vm_page_aflag_set(m, PGA_WRITEABLE); 3752 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 3753 goto unchanged; 3754 goto validate; 3755 } 3756 3757 /* 3758 * The physical page has changed. Temporarily invalidate 3759 * the mapping. This ensures that all threads sharing the 3760 * pmap keep a consistent view of the mapping, which is 3761 * necessary for the correct handling of COW faults. It 3762 * also permits reuse of the old mapping's PV entry, 3763 * avoiding an allocation. 3764 * 3765 * For consistency, handle unmanaged mappings the same way. 3766 */ 3767 origpte = pte_load_clear(pte); 3768 KASSERT((origpte & PG_FRAME) == opa, 3769 ("pmap_enter: unexpected pa update for %#x", va)); 3770 if ((origpte & PG_MANAGED) != 0) { 3771 om = PHYS_TO_VM_PAGE(opa); 3772 3773 /* 3774 * The pmap lock is sufficient to synchronize with 3775 * concurrent calls to pmap_page_test_mappings() and 3776 * pmap_ts_referenced(). 3777 */ 3778 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3779 vm_page_dirty(om); 3780 if ((origpte & PG_A) != 0) { 3781 pmap_invalidate_page_int(pmap, va); 3782 vm_page_aflag_set(om, PGA_REFERENCED); 3783 } 3784 pv = pmap_pvh_remove(&om->md, pmap, va); 3785 KASSERT(pv != NULL, 3786 ("pmap_enter: no PV entry for %#x", va)); 3787 if ((newpte & PG_MANAGED) == 0) 3788 free_pv_entry(pmap, pv); 3789 if ((om->a.flags & PGA_WRITEABLE) != 0 && 3790 TAILQ_EMPTY(&om->md.pv_list) && 3791 ((om->flags & PG_FICTITIOUS) != 0 || 3792 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 3793 vm_page_aflag_clear(om, PGA_WRITEABLE); 3794 } else { 3795 /* 3796 * Since this mapping is unmanaged, assume that PG_A 3797 * is set. 3798 */ 3799 pmap_invalidate_page_int(pmap, va); 3800 } 3801 origpte = 0; 3802 } else { 3803 /* 3804 * Increment the counters. 3805 */ 3806 if ((newpte & PG_W) != 0) 3807 pmap->pm_stats.wired_count++; 3808 pmap->pm_stats.resident_count++; 3809 } 3810 3811 /* 3812 * Enter on the PV list if part of our managed memory. 3813 */ 3814 if ((newpte & PG_MANAGED) != 0) { 3815 if (pv == NULL) { 3816 pv = get_pv_entry(pmap, FALSE); 3817 pv->pv_va = va; 3818 } 3819 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3820 if ((newpte & PG_RW) != 0) 3821 vm_page_aflag_set(m, PGA_WRITEABLE); 3822 } 3823 3824 /* 3825 * Update the PTE. 3826 */ 3827 if ((origpte & PG_V) != 0) { 3828 validate: 3829 origpte = pte_load_store(pte, newpte); 3830 KASSERT((origpte & PG_FRAME) == pa, 3831 ("pmap_enter: unexpected pa update for %#x", va)); 3832 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == 3833 (PG_M | PG_RW)) { 3834 if ((origpte & PG_MANAGED) != 0) 3835 vm_page_dirty(m); 3836 3837 /* 3838 * Although the PTE may still have PG_RW set, TLB 3839 * invalidation may nonetheless be required because 3840 * the PTE no longer has PG_M set. 3841 */ 3842 } 3843 #ifdef PMAP_PAE_COMP 3844 else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { 3845 /* 3846 * This PTE change does not require TLB invalidation. 3847 */ 3848 goto unchanged; 3849 } 3850 #endif 3851 if ((origpte & PG_A) != 0) 3852 pmap_invalidate_page_int(pmap, va); 3853 } else 3854 pte_store_zero(pte, newpte); 3855 3856 unchanged: 3857 3858 #if VM_NRESERVLEVEL > 0 3859 /* 3860 * If both the page table page and the reservation are fully 3861 * populated, then attempt promotion. 3862 */ 3863 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 3864 pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 && 3865 vm_reserv_level_iffullpop(m) == 0) 3866 pmap_promote_pde(pmap, pde, va); 3867 #endif 3868 3869 rv = KERN_SUCCESS; 3870 out: 3871 sched_unpin(); 3872 rw_wunlock(&pvh_global_lock); 3873 PMAP_UNLOCK(pmap); 3874 return (rv); 3875 } 3876 3877 /* 3878 * Tries to create a read- and/or execute-only 2 or 4 MB page mapping. Returns 3879 * true if successful. Returns false if (1) a mapping already exists at the 3880 * specified virtual address or (2) a PV entry cannot be allocated without 3881 * reclaiming another PV entry. 3882 */ 3883 static bool 3884 pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3885 { 3886 pd_entry_t newpde; 3887 3888 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3889 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | 3890 PG_PS | PG_V; 3891 if ((m->oflags & VPO_UNMANAGED) == 0) 3892 newpde |= PG_MANAGED; 3893 #ifdef PMAP_PAE_COMP 3894 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3895 newpde |= pg_nx; 3896 #endif 3897 if (pmap != kernel_pmap) 3898 newpde |= PG_U; 3899 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | 3900 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL) == 3901 KERN_SUCCESS); 3902 } 3903 3904 /* 3905 * Returns true if every page table entry in the page table page that maps 3906 * the specified kernel virtual address is zero. 3907 */ 3908 static bool 3909 pmap_every_pte_zero(vm_offset_t va) 3910 { 3911 pt_entry_t *pt_end, *pte; 3912 3913 KASSERT((va & PDRMASK) == 0, ("va is misaligned")); 3914 pte = vtopte(va); 3915 for (pt_end = pte + NPTEPG; pte < pt_end; pte++) { 3916 if (*pte != 0) 3917 return (false); 3918 } 3919 return (true); 3920 } 3921 3922 /* 3923 * Tries to create the specified 2 or 4 MB page mapping. Returns KERN_SUCCESS 3924 * if the mapping was created, and either KERN_FAILURE or 3925 * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if 3926 * PMAP_ENTER_NOREPLACE was specified and a mapping already exists at the 3927 * specified virtual address. Returns KERN_RESOURCE_SHORTAGE if 3928 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. 3929 * 3930 * The parameter "m" is only used when creating a managed, writeable mapping. 3931 */ 3932 static int 3933 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, 3934 vm_page_t m) 3935 { 3936 struct spglist free; 3937 pd_entry_t oldpde, *pde; 3938 vm_page_t mt; 3939 3940 rw_assert(&pvh_global_lock, RA_WLOCKED); 3941 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, 3942 ("pmap_enter_pde: newpde is missing PG_M")); 3943 KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0, 3944 ("pmap_enter_pde: cannot create wired user mapping")); 3945 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3946 pde = pmap_pde(pmap, va); 3947 oldpde = *pde; 3948 if ((oldpde & PG_V) != 0) { 3949 if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (pmap != 3950 kernel_pmap || (oldpde & PG_PS) != 0 || 3951 !pmap_every_pte_zero(va))) { 3952 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3953 " in pmap %p", va, pmap); 3954 return (KERN_FAILURE); 3955 } 3956 /* Break the existing mapping(s). */ 3957 SLIST_INIT(&free); 3958 if ((oldpde & PG_PS) != 0) { 3959 /* 3960 * If the PDE resulted from a promotion, then a 3961 * reserved PT page could be freed. 3962 */ 3963 (void)pmap_remove_pde(pmap, pde, va, &free); 3964 if ((oldpde & PG_G) == 0) 3965 pmap_invalidate_pde_page(pmap, va, oldpde); 3966 } else { 3967 if (pmap_remove_ptes(pmap, va, va + NBPDR, &free)) 3968 pmap_invalidate_all_int(pmap); 3969 } 3970 if (pmap != kernel_pmap) { 3971 vm_page_free_pages_toq(&free, true); 3972 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", 3973 pde)); 3974 } else { 3975 KASSERT(SLIST_EMPTY(&free), 3976 ("pmap_enter_pde: freed kernel page table page")); 3977 3978 /* 3979 * Both pmap_remove_pde() and pmap_remove_ptes() will 3980 * leave the kernel page table page zero filled. 3981 */ 3982 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3983 if (pmap_insert_pt_page(pmap, mt, false)) 3984 panic("pmap_enter_pde: trie insert failed"); 3985 } 3986 } 3987 if ((newpde & PG_MANAGED) != 0) { 3988 /* 3989 * Abort this mapping if its PV entry could not be created. 3990 */ 3991 if (!pmap_pv_insert_pde(pmap, va, newpde, flags)) { 3992 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3993 " in pmap %p", va, pmap); 3994 return (KERN_RESOURCE_SHORTAGE); 3995 } 3996 if ((newpde & PG_RW) != 0) { 3997 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 3998 vm_page_aflag_set(mt, PGA_WRITEABLE); 3999 } 4000 } 4001 4002 /* 4003 * Increment counters. 4004 */ 4005 if ((newpde & PG_W) != 0) 4006 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; 4007 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; 4008 4009 /* 4010 * Map the superpage. (This is not a promoted mapping; there will not 4011 * be any lingering 4KB page mappings in the TLB.) 4012 */ 4013 pde_store(pde, newpde); 4014 4015 pmap_pde_mappings++; 4016 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p", 4017 va, pmap); 4018 return (KERN_SUCCESS); 4019 } 4020 4021 /* 4022 * Maps a sequence of resident pages belonging to the same object. 4023 * The sequence begins with the given page m_start. This page is 4024 * mapped at the given virtual address start. Each subsequent page is 4025 * mapped at a virtual address that is offset from start by the same 4026 * amount as the page is offset from m_start within the object. The 4027 * last page in the sequence is the page with the largest offset from 4028 * m_start that can be mapped at a virtual address less than the given 4029 * virtual address end. Not every virtual page between start and end 4030 * is mapped; only those for which a resident page exists with the 4031 * corresponding offset from m_start are mapped. 4032 */ 4033 static void 4034 __CONCAT(PMTYPE, enter_object)(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4035 vm_page_t m_start, vm_prot_t prot) 4036 { 4037 vm_offset_t va; 4038 vm_page_t m, mpte; 4039 vm_pindex_t diff, psize; 4040 4041 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4042 4043 psize = atop(end - start); 4044 mpte = NULL; 4045 m = m_start; 4046 rw_wlock(&pvh_global_lock); 4047 PMAP_LOCK(pmap); 4048 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4049 va = start + ptoa(diff); 4050 if ((va & PDRMASK) == 0 && va + NBPDR <= end && 4051 m->psind == 1 && pg_ps_enabled && 4052 pmap_enter_4mpage(pmap, va, m, prot)) 4053 m = &m[NBPDR / PAGE_SIZE - 1]; 4054 else 4055 mpte = pmap_enter_quick_locked(pmap, va, m, prot, 4056 mpte); 4057 m = TAILQ_NEXT(m, listq); 4058 } 4059 rw_wunlock(&pvh_global_lock); 4060 PMAP_UNLOCK(pmap); 4061 } 4062 4063 /* 4064 * this code makes some *MAJOR* assumptions: 4065 * 1. Current pmap & pmap exists. 4066 * 2. Not wired. 4067 * 3. Read access. 4068 * 4. No page table pages. 4069 * but is *MUCH* faster than pmap_enter... 4070 */ 4071 4072 static void 4073 __CONCAT(PMTYPE, enter_quick)(pmap_t pmap, vm_offset_t va, vm_page_t m, 4074 vm_prot_t prot) 4075 { 4076 4077 rw_wlock(&pvh_global_lock); 4078 PMAP_LOCK(pmap); 4079 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4080 rw_wunlock(&pvh_global_lock); 4081 PMAP_UNLOCK(pmap); 4082 } 4083 4084 static vm_page_t 4085 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4086 vm_prot_t prot, vm_page_t mpte) 4087 { 4088 pt_entry_t newpte, *pte; 4089 4090 KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) || 4091 (m->oflags & VPO_UNMANAGED) != 0, 4092 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 4093 rw_assert(&pvh_global_lock, RA_WLOCKED); 4094 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4095 4096 /* 4097 * In the case that a page table page is not 4098 * resident, we are creating it here. 4099 */ 4100 if (pmap != kernel_pmap) { 4101 u_int ptepindex; 4102 pd_entry_t ptepa; 4103 4104 /* 4105 * Calculate pagetable page index 4106 */ 4107 ptepindex = va >> PDRSHIFT; 4108 if (mpte && (mpte->pindex == ptepindex)) { 4109 mpte->ref_count++; 4110 } else { 4111 /* 4112 * Get the page directory entry 4113 */ 4114 ptepa = pmap->pm_pdir[ptepindex]; 4115 4116 /* 4117 * If the page table page is mapped, we just increment 4118 * the hold count, and activate it. 4119 */ 4120 if (ptepa) { 4121 if (ptepa & PG_PS) 4122 return (NULL); 4123 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 4124 mpte->ref_count++; 4125 } else { 4126 mpte = _pmap_allocpte(pmap, ptepindex, 4127 PMAP_ENTER_NOSLEEP); 4128 if (mpte == NULL) 4129 return (mpte); 4130 } 4131 } 4132 } else { 4133 mpte = NULL; 4134 } 4135 4136 sched_pin(); 4137 pte = pmap_pte_quick(pmap, va); 4138 if (*pte) { 4139 if (mpte != NULL) 4140 mpte->ref_count--; 4141 sched_unpin(); 4142 return (NULL); 4143 } 4144 4145 /* 4146 * Enter on the PV list if part of our managed memory. 4147 */ 4148 if ((m->oflags & VPO_UNMANAGED) == 0 && 4149 !pmap_try_insert_pv_entry(pmap, va, m)) { 4150 if (mpte != NULL) 4151 pmap_abort_ptp(pmap, va, mpte); 4152 sched_unpin(); 4153 return (NULL); 4154 } 4155 4156 /* 4157 * Increment counters 4158 */ 4159 pmap->pm_stats.resident_count++; 4160 4161 newpte = VM_PAGE_TO_PHYS(m) | PG_V | 4162 pmap_cache_bits(pmap, m->md.pat_mode, 0); 4163 if ((m->oflags & VPO_UNMANAGED) == 0) 4164 newpte |= PG_MANAGED; 4165 #ifdef PMAP_PAE_COMP 4166 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 4167 newpte |= pg_nx; 4168 #endif 4169 if (pmap != kernel_pmap) 4170 newpte |= PG_U; 4171 pte_store_zero(pte, newpte); 4172 sched_unpin(); 4173 return (mpte); 4174 } 4175 4176 /* 4177 * Make a temporary mapping for a physical address. This is only intended 4178 * to be used for panic dumps. 4179 */ 4180 static void * 4181 __CONCAT(PMTYPE, kenter_temporary)(vm_paddr_t pa, int i) 4182 { 4183 vm_offset_t va; 4184 4185 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 4186 pmap_kenter(va, pa); 4187 invlpg(va); 4188 return ((void *)crashdumpmap); 4189 } 4190 4191 /* 4192 * This code maps large physical mmap regions into the 4193 * processor address space. Note that some shortcuts 4194 * are taken, but the code works. 4195 */ 4196 static void 4197 __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr, 4198 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 4199 { 4200 pd_entry_t *pde; 4201 vm_paddr_t pa, ptepa; 4202 vm_page_t p; 4203 int pat_mode; 4204 4205 VM_OBJECT_ASSERT_WLOCKED(object); 4206 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4207 ("pmap_object_init_pt: non-device object")); 4208 if (pg_ps_enabled && 4209 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 4210 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4211 return; 4212 p = vm_page_lookup(object, pindex); 4213 KASSERT(vm_page_all_valid(p), 4214 ("pmap_object_init_pt: invalid page %p", p)); 4215 pat_mode = p->md.pat_mode; 4216 4217 /* 4218 * Abort the mapping if the first page is not physically 4219 * aligned to a 2/4MB page boundary. 4220 */ 4221 ptepa = VM_PAGE_TO_PHYS(p); 4222 if (ptepa & (NBPDR - 1)) 4223 return; 4224 4225 /* 4226 * Skip the first page. Abort the mapping if the rest of 4227 * the pages are not physically contiguous or have differing 4228 * memory attributes. 4229 */ 4230 p = TAILQ_NEXT(p, listq); 4231 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 4232 pa += PAGE_SIZE) { 4233 KASSERT(vm_page_all_valid(p), 4234 ("pmap_object_init_pt: invalid page %p", p)); 4235 if (pa != VM_PAGE_TO_PHYS(p) || 4236 pat_mode != p->md.pat_mode) 4237 return; 4238 p = TAILQ_NEXT(p, listq); 4239 } 4240 4241 /* 4242 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 4243 * "size" is a multiple of 2/4M, adding the PAT setting to 4244 * "pa" will not affect the termination of this loop. 4245 */ 4246 PMAP_LOCK(pmap); 4247 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); 4248 pa < ptepa + size; pa += NBPDR) { 4249 pde = pmap_pde(pmap, addr); 4250 if (*pde == 0) { 4251 pde_store(pde, pa | PG_PS | PG_M | PG_A | 4252 PG_U | PG_RW | PG_V); 4253 pmap->pm_stats.resident_count += NBPDR / 4254 PAGE_SIZE; 4255 pmap_pde_mappings++; 4256 } 4257 /* Else continue on if the PDE is already valid. */ 4258 addr += NBPDR; 4259 } 4260 PMAP_UNLOCK(pmap); 4261 } 4262 } 4263 4264 /* 4265 * Clear the wired attribute from the mappings for the specified range of 4266 * addresses in the given pmap. Every valid mapping within that range 4267 * must have the wired attribute set. In contrast, invalid mappings 4268 * cannot have the wired attribute set, so they are ignored. 4269 * 4270 * The wired attribute of the page table entry is not a hardware feature, 4271 * so there is no need to invalidate any TLB entries. 4272 */ 4273 static void 4274 __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4275 { 4276 vm_offset_t pdnxt; 4277 pd_entry_t *pde; 4278 pt_entry_t *pte; 4279 boolean_t pv_lists_locked; 4280 4281 if (pmap_is_current(pmap)) 4282 pv_lists_locked = FALSE; 4283 else { 4284 pv_lists_locked = TRUE; 4285 resume: 4286 rw_wlock(&pvh_global_lock); 4287 sched_pin(); 4288 } 4289 PMAP_LOCK(pmap); 4290 for (; sva < eva; sva = pdnxt) { 4291 pdnxt = (sva + NBPDR) & ~PDRMASK; 4292 if (pdnxt < sva) 4293 pdnxt = eva; 4294 pde = pmap_pde(pmap, sva); 4295 if ((*pde & PG_V) == 0) 4296 continue; 4297 if ((*pde & PG_PS) != 0) { 4298 if ((*pde & PG_W) == 0) 4299 panic("pmap_unwire: pde %#jx is missing PG_W", 4300 (uintmax_t)*pde); 4301 4302 /* 4303 * Are we unwiring the entire large page? If not, 4304 * demote the mapping and fall through. 4305 */ 4306 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 4307 /* 4308 * Regardless of whether a pde (or pte) is 32 4309 * or 64 bits in size, PG_W is among the least 4310 * significant 32 bits. 4311 */ 4312 atomic_clear_int((u_int *)pde, PG_W); 4313 pmap->pm_stats.wired_count -= NBPDR / 4314 PAGE_SIZE; 4315 continue; 4316 } else { 4317 if (!pv_lists_locked) { 4318 pv_lists_locked = TRUE; 4319 if (!rw_try_wlock(&pvh_global_lock)) { 4320 PMAP_UNLOCK(pmap); 4321 /* Repeat sva. */ 4322 goto resume; 4323 } 4324 sched_pin(); 4325 } 4326 if (!pmap_demote_pde(pmap, pde, sva)) 4327 panic("pmap_unwire: demotion failed"); 4328 } 4329 } 4330 if (pdnxt > eva) 4331 pdnxt = eva; 4332 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 4333 sva += PAGE_SIZE) { 4334 if ((*pte & PG_V) == 0) 4335 continue; 4336 if ((*pte & PG_W) == 0) 4337 panic("pmap_unwire: pte %#jx is missing PG_W", 4338 (uintmax_t)*pte); 4339 4340 /* 4341 * PG_W must be cleared atomically. Although the pmap 4342 * lock synchronizes access to PG_W, another processor 4343 * could be setting PG_M and/or PG_A concurrently. 4344 * 4345 * PG_W is among the least significant 32 bits. 4346 */ 4347 atomic_clear_int((u_int *)pte, PG_W); 4348 pmap->pm_stats.wired_count--; 4349 } 4350 } 4351 if (pv_lists_locked) { 4352 sched_unpin(); 4353 rw_wunlock(&pvh_global_lock); 4354 } 4355 PMAP_UNLOCK(pmap); 4356 } 4357 4358 /* 4359 * Copy the range specified by src_addr/len 4360 * from the source map to the range dst_addr/len 4361 * in the destination map. 4362 * 4363 * This routine is only advisory and need not do anything. Since 4364 * current pmap is always the kernel pmap when executing in 4365 * kernel, and we do not copy from the kernel pmap to a user 4366 * pmap, this optimization is not usable in 4/4G full split i386 4367 * world. 4368 */ 4369 4370 static void 4371 __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 4372 vm_size_t len, vm_offset_t src_addr) 4373 { 4374 pt_entry_t *src_pte, *dst_pte, ptetemp; 4375 pd_entry_t srcptepaddr; 4376 vm_page_t dstmpte, srcmpte; 4377 vm_offset_t addr, end_addr, pdnxt; 4378 u_int ptepindex; 4379 4380 if (dst_addr != src_addr) 4381 return; 4382 4383 end_addr = src_addr + len; 4384 4385 rw_wlock(&pvh_global_lock); 4386 if (dst_pmap < src_pmap) { 4387 PMAP_LOCK(dst_pmap); 4388 PMAP_LOCK(src_pmap); 4389 } else { 4390 PMAP_LOCK(src_pmap); 4391 PMAP_LOCK(dst_pmap); 4392 } 4393 sched_pin(); 4394 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 4395 KASSERT(addr < PMAP_TRM_MIN_ADDRESS, 4396 ("pmap_copy: invalid to pmap_copy the trampoline")); 4397 4398 pdnxt = (addr + NBPDR) & ~PDRMASK; 4399 if (pdnxt < addr) 4400 pdnxt = end_addr; 4401 ptepindex = addr >> PDRSHIFT; 4402 4403 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 4404 if (srcptepaddr == 0) 4405 continue; 4406 4407 if (srcptepaddr & PG_PS) { 4408 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) 4409 continue; 4410 if (dst_pmap->pm_pdir[ptepindex] == 0 && 4411 ((srcptepaddr & PG_MANAGED) == 0 || 4412 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, 4413 PMAP_ENTER_NORECLAIM))) { 4414 dst_pmap->pm_pdir[ptepindex] = srcptepaddr & 4415 ~PG_W; 4416 dst_pmap->pm_stats.resident_count += 4417 NBPDR / PAGE_SIZE; 4418 pmap_pde_mappings++; 4419 } 4420 continue; 4421 } 4422 4423 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 4424 KASSERT(srcmpte->ref_count > 0, 4425 ("pmap_copy: source page table page is unused")); 4426 4427 if (pdnxt > end_addr) 4428 pdnxt = end_addr; 4429 4430 src_pte = pmap_pte_quick3(src_pmap, addr); 4431 while (addr < pdnxt) { 4432 ptetemp = *src_pte; 4433 /* 4434 * we only virtual copy managed pages 4435 */ 4436 if ((ptetemp & PG_MANAGED) != 0) { 4437 dstmpte = pmap_allocpte(dst_pmap, addr, 4438 PMAP_ENTER_NOSLEEP); 4439 if (dstmpte == NULL) 4440 goto out; 4441 dst_pte = pmap_pte_quick(dst_pmap, addr); 4442 if (*dst_pte == 0 && 4443 pmap_try_insert_pv_entry(dst_pmap, addr, 4444 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { 4445 /* 4446 * Clear the wired, modified, and 4447 * accessed (referenced) bits 4448 * during the copy. 4449 */ 4450 *dst_pte = ptetemp & ~(PG_W | PG_M | 4451 PG_A); 4452 dst_pmap->pm_stats.resident_count++; 4453 } else { 4454 pmap_abort_ptp(dst_pmap, addr, dstmpte); 4455 goto out; 4456 } 4457 if (dstmpte->ref_count >= srcmpte->ref_count) 4458 break; 4459 } 4460 addr += PAGE_SIZE; 4461 src_pte++; 4462 } 4463 } 4464 out: 4465 sched_unpin(); 4466 rw_wunlock(&pvh_global_lock); 4467 PMAP_UNLOCK(src_pmap); 4468 PMAP_UNLOCK(dst_pmap); 4469 } 4470 4471 /* 4472 * Zero 1 page of virtual memory mapped from a hardware page by the caller. 4473 */ 4474 static __inline void 4475 pagezero(void *page) 4476 { 4477 #if defined(I686_CPU) 4478 if (cpu_class == CPUCLASS_686) { 4479 if (cpu_feature & CPUID_SSE2) 4480 sse2_pagezero(page); 4481 else 4482 i686_pagezero(page); 4483 } else 4484 #endif 4485 bzero(page, PAGE_SIZE); 4486 } 4487 4488 /* 4489 * Zero the specified hardware page. 4490 */ 4491 static void 4492 __CONCAT(PMTYPE, zero_page)(vm_page_t m) 4493 { 4494 pt_entry_t *cmap_pte2; 4495 struct pcpu *pc; 4496 4497 sched_pin(); 4498 pc = get_pcpu(); 4499 cmap_pte2 = pc->pc_cmap_pte2; 4500 mtx_lock(&pc->pc_cmap_lock); 4501 if (*cmap_pte2) 4502 panic("pmap_zero_page: CMAP2 busy"); 4503 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4504 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4505 invlcaddr(pc->pc_cmap_addr2); 4506 pagezero(pc->pc_cmap_addr2); 4507 *cmap_pte2 = 0; 4508 4509 /* 4510 * Unpin the thread before releasing the lock. Otherwise the thread 4511 * could be rescheduled while still bound to the current CPU, only 4512 * to unpin itself immediately upon resuming execution. 4513 */ 4514 sched_unpin(); 4515 mtx_unlock(&pc->pc_cmap_lock); 4516 } 4517 4518 /* 4519 * Zero an area within a single hardware page. off and size must not 4520 * cover an area beyond a single hardware page. 4521 */ 4522 static void 4523 __CONCAT(PMTYPE, zero_page_area)(vm_page_t m, int off, int size) 4524 { 4525 pt_entry_t *cmap_pte2; 4526 struct pcpu *pc; 4527 4528 sched_pin(); 4529 pc = get_pcpu(); 4530 cmap_pte2 = pc->pc_cmap_pte2; 4531 mtx_lock(&pc->pc_cmap_lock); 4532 if (*cmap_pte2) 4533 panic("pmap_zero_page_area: CMAP2 busy"); 4534 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4535 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4536 invlcaddr(pc->pc_cmap_addr2); 4537 if (off == 0 && size == PAGE_SIZE) 4538 pagezero(pc->pc_cmap_addr2); 4539 else 4540 bzero(pc->pc_cmap_addr2 + off, size); 4541 *cmap_pte2 = 0; 4542 sched_unpin(); 4543 mtx_unlock(&pc->pc_cmap_lock); 4544 } 4545 4546 /* 4547 * Copy 1 specified hardware page to another. 4548 */ 4549 static void 4550 __CONCAT(PMTYPE, copy_page)(vm_page_t src, vm_page_t dst) 4551 { 4552 pt_entry_t *cmap_pte1, *cmap_pte2; 4553 struct pcpu *pc; 4554 4555 sched_pin(); 4556 pc = get_pcpu(); 4557 cmap_pte1 = pc->pc_cmap_pte1; 4558 cmap_pte2 = pc->pc_cmap_pte2; 4559 mtx_lock(&pc->pc_cmap_lock); 4560 if (*cmap_pte1) 4561 panic("pmap_copy_page: CMAP1 busy"); 4562 if (*cmap_pte2) 4563 panic("pmap_copy_page: CMAP2 busy"); 4564 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | 4565 pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0); 4566 invlcaddr(pc->pc_cmap_addr1); 4567 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | 4568 pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0); 4569 invlcaddr(pc->pc_cmap_addr2); 4570 bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE); 4571 *cmap_pte1 = 0; 4572 *cmap_pte2 = 0; 4573 sched_unpin(); 4574 mtx_unlock(&pc->pc_cmap_lock); 4575 } 4576 4577 static void 4578 __CONCAT(PMTYPE, copy_pages)(vm_page_t ma[], vm_offset_t a_offset, 4579 vm_page_t mb[], vm_offset_t b_offset, int xfersize) 4580 { 4581 vm_page_t a_pg, b_pg; 4582 char *a_cp, *b_cp; 4583 vm_offset_t a_pg_offset, b_pg_offset; 4584 pt_entry_t *cmap_pte1, *cmap_pte2; 4585 struct pcpu *pc; 4586 int cnt; 4587 4588 sched_pin(); 4589 pc = get_pcpu(); 4590 cmap_pte1 = pc->pc_cmap_pte1; 4591 cmap_pte2 = pc->pc_cmap_pte2; 4592 mtx_lock(&pc->pc_cmap_lock); 4593 if (*cmap_pte1 != 0) 4594 panic("pmap_copy_pages: CMAP1 busy"); 4595 if (*cmap_pte2 != 0) 4596 panic("pmap_copy_pages: CMAP2 busy"); 4597 while (xfersize > 0) { 4598 a_pg = ma[a_offset >> PAGE_SHIFT]; 4599 a_pg_offset = a_offset & PAGE_MASK; 4600 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 4601 b_pg = mb[b_offset >> PAGE_SHIFT]; 4602 b_pg_offset = b_offset & PAGE_MASK; 4603 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 4604 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | 4605 pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0); 4606 invlcaddr(pc->pc_cmap_addr1); 4607 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | 4608 PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0); 4609 invlcaddr(pc->pc_cmap_addr2); 4610 a_cp = pc->pc_cmap_addr1 + a_pg_offset; 4611 b_cp = pc->pc_cmap_addr2 + b_pg_offset; 4612 bcopy(a_cp, b_cp, cnt); 4613 a_offset += cnt; 4614 b_offset += cnt; 4615 xfersize -= cnt; 4616 } 4617 *cmap_pte1 = 0; 4618 *cmap_pte2 = 0; 4619 sched_unpin(); 4620 mtx_unlock(&pc->pc_cmap_lock); 4621 } 4622 4623 /* 4624 * Returns true if the pmap's pv is one of the first 4625 * 16 pvs linked to from this page. This count may 4626 * be changed upwards or downwards in the future; it 4627 * is only necessary that true be returned for a small 4628 * subset of pmaps for proper page aging. 4629 */ 4630 static boolean_t 4631 __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m) 4632 { 4633 struct md_page *pvh; 4634 pv_entry_t pv; 4635 int loops = 0; 4636 boolean_t rv; 4637 4638 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4639 ("pmap_page_exists_quick: page %p is not managed", m)); 4640 rv = FALSE; 4641 rw_wlock(&pvh_global_lock); 4642 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 4643 if (PV_PMAP(pv) == pmap) { 4644 rv = TRUE; 4645 break; 4646 } 4647 loops++; 4648 if (loops >= 16) 4649 break; 4650 } 4651 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 4652 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4653 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4654 if (PV_PMAP(pv) == pmap) { 4655 rv = TRUE; 4656 break; 4657 } 4658 loops++; 4659 if (loops >= 16) 4660 break; 4661 } 4662 } 4663 rw_wunlock(&pvh_global_lock); 4664 return (rv); 4665 } 4666 4667 /* 4668 * pmap_page_wired_mappings: 4669 * 4670 * Return the number of managed mappings to the given physical page 4671 * that are wired. 4672 */ 4673 static int 4674 __CONCAT(PMTYPE, page_wired_mappings)(vm_page_t m) 4675 { 4676 int count; 4677 4678 count = 0; 4679 if ((m->oflags & VPO_UNMANAGED) != 0) 4680 return (count); 4681 rw_wlock(&pvh_global_lock); 4682 count = pmap_pvh_wired_mappings(&m->md, count); 4683 if ((m->flags & PG_FICTITIOUS) == 0) { 4684 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 4685 count); 4686 } 4687 rw_wunlock(&pvh_global_lock); 4688 return (count); 4689 } 4690 4691 /* 4692 * pmap_pvh_wired_mappings: 4693 * 4694 * Return the updated number "count" of managed mappings that are wired. 4695 */ 4696 static int 4697 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 4698 { 4699 pmap_t pmap; 4700 pt_entry_t *pte; 4701 pv_entry_t pv; 4702 4703 rw_assert(&pvh_global_lock, RA_WLOCKED); 4704 sched_pin(); 4705 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4706 pmap = PV_PMAP(pv); 4707 PMAP_LOCK(pmap); 4708 pte = pmap_pte_quick(pmap, pv->pv_va); 4709 if ((*pte & PG_W) != 0) 4710 count++; 4711 PMAP_UNLOCK(pmap); 4712 } 4713 sched_unpin(); 4714 return (count); 4715 } 4716 4717 /* 4718 * Returns TRUE if the given page is mapped individually or as part of 4719 * a 4mpage. Otherwise, returns FALSE. 4720 */ 4721 static boolean_t 4722 __CONCAT(PMTYPE, page_is_mapped)(vm_page_t m) 4723 { 4724 boolean_t rv; 4725 4726 if ((m->oflags & VPO_UNMANAGED) != 0) 4727 return (FALSE); 4728 rw_wlock(&pvh_global_lock); 4729 rv = !TAILQ_EMPTY(&m->md.pv_list) || 4730 ((m->flags & PG_FICTITIOUS) == 0 && 4731 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 4732 rw_wunlock(&pvh_global_lock); 4733 return (rv); 4734 } 4735 4736 /* 4737 * Remove all pages from specified address space 4738 * this aids process exit speeds. Also, this code 4739 * is special cased for current process only, but 4740 * can have the more generic (and slightly slower) 4741 * mode enabled. This is much faster than pmap_remove 4742 * in the case of running down an entire address space. 4743 */ 4744 static void 4745 __CONCAT(PMTYPE, remove_pages)(pmap_t pmap) 4746 { 4747 pt_entry_t *pte, tpte; 4748 vm_page_t m, mpte, mt; 4749 pv_entry_t pv; 4750 struct md_page *pvh; 4751 struct pv_chunk *pc, *npc; 4752 struct spglist free; 4753 int field, idx; 4754 int32_t bit; 4755 uint32_t inuse, bitmask; 4756 int allfree; 4757 4758 if (pmap != PCPU_GET(curpmap)) { 4759 printf("warning: pmap_remove_pages called with non-current pmap\n"); 4760 return; 4761 } 4762 SLIST_INIT(&free); 4763 rw_wlock(&pvh_global_lock); 4764 PMAP_LOCK(pmap); 4765 sched_pin(); 4766 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4767 KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, 4768 pc->pc_pmap)); 4769 allfree = 1; 4770 for (field = 0; field < _NPCM; field++) { 4771 inuse = ~pc->pc_map[field] & pc_freemask[field]; 4772 while (inuse != 0) { 4773 bit = bsfl(inuse); 4774 bitmask = 1UL << bit; 4775 idx = field * 32 + bit; 4776 pv = &pc->pc_pventry[idx]; 4777 inuse &= ~bitmask; 4778 4779 pte = pmap_pde(pmap, pv->pv_va); 4780 tpte = *pte; 4781 if ((tpte & PG_PS) == 0) { 4782 pte = pmap_pte_quick(pmap, pv->pv_va); 4783 tpte = *pte & ~PG_PTE_PAT; 4784 } 4785 4786 if (tpte == 0) { 4787 printf( 4788 "TPTE at %p IS ZERO @ VA %08x\n", 4789 pte, pv->pv_va); 4790 panic("bad pte"); 4791 } 4792 4793 /* 4794 * We cannot remove wired pages from a process' mapping at this time 4795 */ 4796 if (tpte & PG_W) { 4797 allfree = 0; 4798 continue; 4799 } 4800 4801 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 4802 KASSERT(m->phys_addr == (tpte & PG_FRAME), 4803 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 4804 m, (uintmax_t)m->phys_addr, 4805 (uintmax_t)tpte)); 4806 4807 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4808 m < &vm_page_array[vm_page_array_size], 4809 ("pmap_remove_pages: bad tpte %#jx", 4810 (uintmax_t)tpte)); 4811 4812 pte_clear(pte); 4813 4814 /* 4815 * Update the vm_page_t clean/reference bits. 4816 */ 4817 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 4818 if ((tpte & PG_PS) != 0) { 4819 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4820 vm_page_dirty(mt); 4821 } else 4822 vm_page_dirty(m); 4823 } 4824 4825 /* Mark free */ 4826 PV_STAT(pv_entry_frees++); 4827 PV_STAT(pv_entry_spare++); 4828 pv_entry_count--; 4829 pc->pc_map[field] |= bitmask; 4830 if ((tpte & PG_PS) != 0) { 4831 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 4832 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 4833 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4834 if (TAILQ_EMPTY(&pvh->pv_list)) { 4835 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4836 if (TAILQ_EMPTY(&mt->md.pv_list)) 4837 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4838 } 4839 mpte = pmap_remove_pt_page(pmap, pv->pv_va); 4840 if (mpte != NULL) { 4841 KASSERT(vm_page_all_valid(mpte), 4842 ("pmap_remove_pages: pte page not promoted")); 4843 pmap->pm_stats.resident_count--; 4844 KASSERT(mpte->ref_count == NPTEPG, 4845 ("pmap_remove_pages: pte page ref count error")); 4846 mpte->ref_count = 0; 4847 pmap_add_delayed_free_list(mpte, &free, FALSE); 4848 } 4849 } else { 4850 pmap->pm_stats.resident_count--; 4851 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4852 if (TAILQ_EMPTY(&m->md.pv_list) && 4853 (m->flags & PG_FICTITIOUS) == 0) { 4854 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4855 if (TAILQ_EMPTY(&pvh->pv_list)) 4856 vm_page_aflag_clear(m, PGA_WRITEABLE); 4857 } 4858 pmap_unuse_pt(pmap, pv->pv_va, &free); 4859 } 4860 } 4861 } 4862 if (allfree) { 4863 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4864 free_pv_chunk(pc); 4865 } 4866 } 4867 sched_unpin(); 4868 pmap_invalidate_all_int(pmap); 4869 rw_wunlock(&pvh_global_lock); 4870 PMAP_UNLOCK(pmap); 4871 vm_page_free_pages_toq(&free, true); 4872 } 4873 4874 /* 4875 * pmap_is_modified: 4876 * 4877 * Return whether or not the specified physical page was modified 4878 * in any physical maps. 4879 */ 4880 static boolean_t 4881 __CONCAT(PMTYPE, is_modified)(vm_page_t m) 4882 { 4883 boolean_t rv; 4884 4885 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4886 ("pmap_is_modified: page %p is not managed", m)); 4887 4888 /* 4889 * If the page is not busied then this check is racy. 4890 */ 4891 if (!pmap_page_is_write_mapped(m)) 4892 return (FALSE); 4893 rw_wlock(&pvh_global_lock); 4894 rv = pmap_is_modified_pvh(&m->md) || 4895 ((m->flags & PG_FICTITIOUS) == 0 && 4896 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4897 rw_wunlock(&pvh_global_lock); 4898 return (rv); 4899 } 4900 4901 /* 4902 * Returns TRUE if any of the given mappings were used to modify 4903 * physical memory. Otherwise, returns FALSE. Both page and 2mpage 4904 * mappings are supported. 4905 */ 4906 static boolean_t 4907 pmap_is_modified_pvh(struct md_page *pvh) 4908 { 4909 pv_entry_t pv; 4910 pt_entry_t *pte; 4911 pmap_t pmap; 4912 boolean_t rv; 4913 4914 rw_assert(&pvh_global_lock, RA_WLOCKED); 4915 rv = FALSE; 4916 sched_pin(); 4917 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4918 pmap = PV_PMAP(pv); 4919 PMAP_LOCK(pmap); 4920 pte = pmap_pte_quick(pmap, pv->pv_va); 4921 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW); 4922 PMAP_UNLOCK(pmap); 4923 if (rv) 4924 break; 4925 } 4926 sched_unpin(); 4927 return (rv); 4928 } 4929 4930 /* 4931 * pmap_is_prefaultable: 4932 * 4933 * Return whether or not the specified virtual address is elgible 4934 * for prefault. 4935 */ 4936 static boolean_t 4937 __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr) 4938 { 4939 pd_entry_t pde; 4940 boolean_t rv; 4941 4942 rv = FALSE; 4943 PMAP_LOCK(pmap); 4944 pde = *pmap_pde(pmap, addr); 4945 if (pde != 0 && (pde & PG_PS) == 0) 4946 rv = pmap_pte_ufast(pmap, addr, pde) == 0; 4947 PMAP_UNLOCK(pmap); 4948 return (rv); 4949 } 4950 4951 /* 4952 * pmap_is_referenced: 4953 * 4954 * Return whether or not the specified physical page was referenced 4955 * in any physical maps. 4956 */ 4957 static boolean_t 4958 __CONCAT(PMTYPE, is_referenced)(vm_page_t m) 4959 { 4960 boolean_t rv; 4961 4962 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4963 ("pmap_is_referenced: page %p is not managed", m)); 4964 rw_wlock(&pvh_global_lock); 4965 rv = pmap_is_referenced_pvh(&m->md) || 4966 ((m->flags & PG_FICTITIOUS) == 0 && 4967 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4968 rw_wunlock(&pvh_global_lock); 4969 return (rv); 4970 } 4971 4972 /* 4973 * Returns TRUE if any of the given mappings were referenced and FALSE 4974 * otherwise. Both page and 4mpage mappings are supported. 4975 */ 4976 static boolean_t 4977 pmap_is_referenced_pvh(struct md_page *pvh) 4978 { 4979 pv_entry_t pv; 4980 pt_entry_t *pte; 4981 pmap_t pmap; 4982 boolean_t rv; 4983 4984 rw_assert(&pvh_global_lock, RA_WLOCKED); 4985 rv = FALSE; 4986 sched_pin(); 4987 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4988 pmap = PV_PMAP(pv); 4989 PMAP_LOCK(pmap); 4990 pte = pmap_pte_quick(pmap, pv->pv_va); 4991 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 4992 PMAP_UNLOCK(pmap); 4993 if (rv) 4994 break; 4995 } 4996 sched_unpin(); 4997 return (rv); 4998 } 4999 5000 /* 5001 * Clear the write and modified bits in each of the given page's mappings. 5002 */ 5003 static void 5004 __CONCAT(PMTYPE, remove_write)(vm_page_t m) 5005 { 5006 struct md_page *pvh; 5007 pv_entry_t next_pv, pv; 5008 pmap_t pmap; 5009 pd_entry_t *pde; 5010 pt_entry_t oldpte, *pte; 5011 vm_offset_t va; 5012 5013 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5014 ("pmap_remove_write: page %p is not managed", m)); 5015 vm_page_assert_busied(m); 5016 5017 if (!pmap_page_is_write_mapped(m)) 5018 return; 5019 rw_wlock(&pvh_global_lock); 5020 sched_pin(); 5021 if ((m->flags & PG_FICTITIOUS) != 0) 5022 goto small_mappings; 5023 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5024 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5025 va = pv->pv_va; 5026 pmap = PV_PMAP(pv); 5027 PMAP_LOCK(pmap); 5028 pde = pmap_pde(pmap, va); 5029 if ((*pde & PG_RW) != 0) 5030 (void)pmap_demote_pde(pmap, pde, va); 5031 PMAP_UNLOCK(pmap); 5032 } 5033 small_mappings: 5034 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5035 pmap = PV_PMAP(pv); 5036 PMAP_LOCK(pmap); 5037 pde = pmap_pde(pmap, pv->pv_va); 5038 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found" 5039 " a 4mpage in page %p's pv list", m)); 5040 pte = pmap_pte_quick(pmap, pv->pv_va); 5041 retry: 5042 oldpte = *pte; 5043 if ((oldpte & PG_RW) != 0) { 5044 /* 5045 * Regardless of whether a pte is 32 or 64 bits 5046 * in size, PG_RW and PG_M are among the least 5047 * significant 32 bits. 5048 */ 5049 if (!atomic_cmpset_int((u_int *)pte, oldpte, 5050 oldpte & ~(PG_RW | PG_M))) 5051 goto retry; 5052 if ((oldpte & PG_M) != 0) 5053 vm_page_dirty(m); 5054 pmap_invalidate_page_int(pmap, pv->pv_va); 5055 } 5056 PMAP_UNLOCK(pmap); 5057 } 5058 vm_page_aflag_clear(m, PGA_WRITEABLE); 5059 sched_unpin(); 5060 rw_wunlock(&pvh_global_lock); 5061 } 5062 5063 /* 5064 * pmap_ts_referenced: 5065 * 5066 * Return a count of reference bits for a page, clearing those bits. 5067 * It is not necessary for every reference bit to be cleared, but it 5068 * is necessary that 0 only be returned when there are truly no 5069 * reference bits set. 5070 * 5071 * As an optimization, update the page's dirty field if a modified bit is 5072 * found while counting reference bits. This opportunistic update can be 5073 * performed at low cost and can eliminate the need for some future calls 5074 * to pmap_is_modified(). However, since this function stops after 5075 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5076 * dirty pages. Those dirty pages will only be detected by a future call 5077 * to pmap_is_modified(). 5078 */ 5079 static int 5080 __CONCAT(PMTYPE, ts_referenced)(vm_page_t m) 5081 { 5082 struct md_page *pvh; 5083 pv_entry_t pv, pvf; 5084 pmap_t pmap; 5085 pd_entry_t *pde; 5086 pt_entry_t *pte; 5087 vm_paddr_t pa; 5088 int rtval = 0; 5089 5090 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5091 ("pmap_ts_referenced: page %p is not managed", m)); 5092 pa = VM_PAGE_TO_PHYS(m); 5093 pvh = pa_to_pvh(pa); 5094 rw_wlock(&pvh_global_lock); 5095 sched_pin(); 5096 if ((m->flags & PG_FICTITIOUS) != 0 || 5097 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5098 goto small_mappings; 5099 pv = pvf; 5100 do { 5101 pmap = PV_PMAP(pv); 5102 PMAP_LOCK(pmap); 5103 pde = pmap_pde(pmap, pv->pv_va); 5104 if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5105 /* 5106 * Although "*pde" is mapping a 2/4MB page, because 5107 * this function is called at a 4KB page granularity, 5108 * we only update the 4KB page under test. 5109 */ 5110 vm_page_dirty(m); 5111 } 5112 if ((*pde & PG_A) != 0) { 5113 /* 5114 * Since this reference bit is shared by either 1024 5115 * or 512 4KB pages, it should not be cleared every 5116 * time it is tested. Apply a simple "hash" function 5117 * on the physical page number, the virtual superpage 5118 * number, and the pmap address to select one 4KB page 5119 * out of the 1024 or 512 on which testing the 5120 * reference bit will result in clearing that bit. 5121 * This function is designed to avoid the selection of 5122 * the same 4KB page for every 2- or 4MB page mapping. 5123 * 5124 * On demotion, a mapping that hasn't been referenced 5125 * is simply destroyed. To avoid the possibility of a 5126 * subsequent page fault on a demoted wired mapping, 5127 * always leave its reference bit set. Moreover, 5128 * since the superpage is wired, the current state of 5129 * its reference bit won't affect page replacement. 5130 */ 5131 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ 5132 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 5133 (*pde & PG_W) == 0) { 5134 atomic_clear_int((u_int *)pde, PG_A); 5135 pmap_invalidate_page_int(pmap, pv->pv_va); 5136 } 5137 rtval++; 5138 } 5139 PMAP_UNLOCK(pmap); 5140 /* Rotate the PV list if it has more than one entry. */ 5141 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5142 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5143 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5144 } 5145 if (rtval >= PMAP_TS_REFERENCED_MAX) 5146 goto out; 5147 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5148 small_mappings: 5149 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5150 goto out; 5151 pv = pvf; 5152 do { 5153 pmap = PV_PMAP(pv); 5154 PMAP_LOCK(pmap); 5155 pde = pmap_pde(pmap, pv->pv_va); 5156 KASSERT((*pde & PG_PS) == 0, 5157 ("pmap_ts_referenced: found a 4mpage in page %p's pv list", 5158 m)); 5159 pte = pmap_pte_quick(pmap, pv->pv_va); 5160 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5161 vm_page_dirty(m); 5162 if ((*pte & PG_A) != 0) { 5163 atomic_clear_int((u_int *)pte, PG_A); 5164 pmap_invalidate_page_int(pmap, pv->pv_va); 5165 rtval++; 5166 } 5167 PMAP_UNLOCK(pmap); 5168 /* Rotate the PV list if it has more than one entry. */ 5169 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5170 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5171 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5172 } 5173 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5174 PMAP_TS_REFERENCED_MAX); 5175 out: 5176 sched_unpin(); 5177 rw_wunlock(&pvh_global_lock); 5178 return (rtval); 5179 } 5180 5181 /* 5182 * Apply the given advice to the specified range of addresses within the 5183 * given pmap. Depending on the advice, clear the referenced and/or 5184 * modified flags in each mapping and set the mapped page's dirty field. 5185 */ 5186 static void 5187 __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 5188 int advice) 5189 { 5190 pd_entry_t oldpde, *pde; 5191 pt_entry_t *pte; 5192 vm_offset_t va, pdnxt; 5193 vm_page_t m; 5194 bool anychanged, pv_lists_locked; 5195 5196 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5197 return; 5198 if (pmap_is_current(pmap)) 5199 pv_lists_locked = false; 5200 else { 5201 pv_lists_locked = true; 5202 resume: 5203 rw_wlock(&pvh_global_lock); 5204 sched_pin(); 5205 } 5206 anychanged = false; 5207 PMAP_LOCK(pmap); 5208 for (; sva < eva; sva = pdnxt) { 5209 pdnxt = (sva + NBPDR) & ~PDRMASK; 5210 if (pdnxt < sva) 5211 pdnxt = eva; 5212 pde = pmap_pde(pmap, sva); 5213 oldpde = *pde; 5214 if ((oldpde & PG_V) == 0) 5215 continue; 5216 else if ((oldpde & PG_PS) != 0) { 5217 if ((oldpde & PG_MANAGED) == 0) 5218 continue; 5219 if (!pv_lists_locked) { 5220 pv_lists_locked = true; 5221 if (!rw_try_wlock(&pvh_global_lock)) { 5222 if (anychanged) 5223 pmap_invalidate_all_int(pmap); 5224 PMAP_UNLOCK(pmap); 5225 goto resume; 5226 } 5227 sched_pin(); 5228 } 5229 if (!pmap_demote_pde(pmap, pde, sva)) { 5230 /* 5231 * The large page mapping was destroyed. 5232 */ 5233 continue; 5234 } 5235 5236 /* 5237 * Unless the page mappings are wired, remove the 5238 * mapping to a single page so that a subsequent 5239 * access may repromote. Choosing the last page 5240 * within the address range [sva, min(pdnxt, eva)) 5241 * generally results in more repromotions. Since the 5242 * underlying page table page is fully populated, this 5243 * removal never frees a page table page. 5244 */ 5245 if ((oldpde & PG_W) == 0) { 5246 va = eva; 5247 if (va > pdnxt) 5248 va = pdnxt; 5249 va -= PAGE_SIZE; 5250 KASSERT(va >= sva, 5251 ("pmap_advise: no address gap")); 5252 pte = pmap_pte_quick(pmap, va); 5253 KASSERT((*pte & PG_V) != 0, 5254 ("pmap_advise: invalid PTE")); 5255 pmap_remove_pte(pmap, pte, va, NULL); 5256 anychanged = true; 5257 } 5258 } 5259 if (pdnxt > eva) 5260 pdnxt = eva; 5261 va = pdnxt; 5262 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 5263 sva += PAGE_SIZE) { 5264 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) 5265 goto maybe_invlrng; 5266 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5267 if (advice == MADV_DONTNEED) { 5268 /* 5269 * Future calls to pmap_is_modified() 5270 * can be avoided by making the page 5271 * dirty now. 5272 */ 5273 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 5274 vm_page_dirty(m); 5275 } 5276 atomic_clear_int((u_int *)pte, PG_M | PG_A); 5277 } else if ((*pte & PG_A) != 0) 5278 atomic_clear_int((u_int *)pte, PG_A); 5279 else 5280 goto maybe_invlrng; 5281 if ((*pte & PG_G) != 0) { 5282 if (va == pdnxt) 5283 va = sva; 5284 } else 5285 anychanged = true; 5286 continue; 5287 maybe_invlrng: 5288 if (va != pdnxt) { 5289 pmap_invalidate_range_int(pmap, va, sva); 5290 va = pdnxt; 5291 } 5292 } 5293 if (va != pdnxt) 5294 pmap_invalidate_range_int(pmap, va, sva); 5295 } 5296 if (anychanged) 5297 pmap_invalidate_all_int(pmap); 5298 if (pv_lists_locked) { 5299 sched_unpin(); 5300 rw_wunlock(&pvh_global_lock); 5301 } 5302 PMAP_UNLOCK(pmap); 5303 } 5304 5305 /* 5306 * Clear the modify bits on the specified physical page. 5307 */ 5308 static void 5309 __CONCAT(PMTYPE, clear_modify)(vm_page_t m) 5310 { 5311 struct md_page *pvh; 5312 pv_entry_t next_pv, pv; 5313 pmap_t pmap; 5314 pd_entry_t oldpde, *pde; 5315 pt_entry_t *pte; 5316 vm_offset_t va; 5317 5318 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5319 ("pmap_clear_modify: page %p is not managed", m)); 5320 vm_page_assert_busied(m); 5321 5322 if (!pmap_page_is_write_mapped(m)) 5323 return; 5324 rw_wlock(&pvh_global_lock); 5325 sched_pin(); 5326 if ((m->flags & PG_FICTITIOUS) != 0) 5327 goto small_mappings; 5328 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5329 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5330 va = pv->pv_va; 5331 pmap = PV_PMAP(pv); 5332 PMAP_LOCK(pmap); 5333 pde = pmap_pde(pmap, va); 5334 oldpde = *pde; 5335 /* If oldpde has PG_RW set, then it also has PG_M set. */ 5336 if ((oldpde & PG_RW) != 0 && 5337 pmap_demote_pde(pmap, pde, va) && 5338 (oldpde & PG_W) == 0) { 5339 /* 5340 * Write protect the mapping to a single page so that 5341 * a subsequent write access may repromote. 5342 */ 5343 va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); 5344 pte = pmap_pte_quick(pmap, va); 5345 /* 5346 * Regardless of whether a pte is 32 or 64 bits 5347 * in size, PG_RW and PG_M are among the least 5348 * significant 32 bits. 5349 */ 5350 atomic_clear_int((u_int *)pte, PG_M | PG_RW); 5351 vm_page_dirty(m); 5352 pmap_invalidate_page_int(pmap, va); 5353 } 5354 PMAP_UNLOCK(pmap); 5355 } 5356 small_mappings: 5357 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5358 pmap = PV_PMAP(pv); 5359 PMAP_LOCK(pmap); 5360 pde = pmap_pde(pmap, pv->pv_va); 5361 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" 5362 " a 4mpage in page %p's pv list", m)); 5363 pte = pmap_pte_quick(pmap, pv->pv_va); 5364 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5365 /* 5366 * Regardless of whether a pte is 32 or 64 bits 5367 * in size, PG_M is among the least significant 5368 * 32 bits. 5369 */ 5370 atomic_clear_int((u_int *)pte, PG_M); 5371 pmap_invalidate_page_int(pmap, pv->pv_va); 5372 } 5373 PMAP_UNLOCK(pmap); 5374 } 5375 sched_unpin(); 5376 rw_wunlock(&pvh_global_lock); 5377 } 5378 5379 /* 5380 * Miscellaneous support routines follow 5381 */ 5382 5383 /* Adjust the cache mode for a 4KB page mapped via a PTE. */ 5384 static __inline void 5385 pmap_pte_attr(pt_entry_t *pte, int cache_bits) 5386 { 5387 u_int opte, npte; 5388 5389 /* 5390 * The cache mode bits are all in the low 32-bits of the 5391 * PTE, so we can just spin on updating the low 32-bits. 5392 */ 5393 do { 5394 opte = *(u_int *)pte; 5395 npte = opte & ~PG_PTE_CACHE; 5396 npte |= cache_bits; 5397 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); 5398 } 5399 5400 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */ 5401 static __inline void 5402 pmap_pde_attr(pd_entry_t *pde, int cache_bits) 5403 { 5404 u_int opde, npde; 5405 5406 /* 5407 * The cache mode bits are all in the low 32-bits of the 5408 * PDE, so we can just spin on updating the low 32-bits. 5409 */ 5410 do { 5411 opde = *(u_int *)pde; 5412 npde = opde & ~PG_PDE_CACHE; 5413 npde |= cache_bits; 5414 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); 5415 } 5416 5417 /* 5418 * Map a set of physical memory pages into the kernel virtual 5419 * address space. Return a pointer to where it is mapped. This 5420 * routine is intended to be used for mapping device memory, 5421 * NOT real memory. 5422 */ 5423 static void * 5424 __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode, 5425 int flags) 5426 { 5427 struct pmap_preinit_mapping *ppim; 5428 vm_offset_t va, offset; 5429 vm_page_t m; 5430 vm_size_t tmpsize; 5431 int i; 5432 5433 offset = pa & PAGE_MASK; 5434 size = round_page(offset + size); 5435 pa = pa & PG_FRAME; 5436 5437 if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) { 5438 va = pa + PMAP_MAP_LOW; 5439 if ((flags & MAPDEV_SETATTR) == 0) 5440 return ((void *)(va + offset)); 5441 } else if (!pmap_initialized) { 5442 va = 0; 5443 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5444 ppim = pmap_preinit_mapping + i; 5445 if (ppim->va == 0) { 5446 ppim->pa = pa; 5447 ppim->sz = size; 5448 ppim->mode = mode; 5449 ppim->va = virtual_avail; 5450 virtual_avail += size; 5451 va = ppim->va; 5452 break; 5453 } 5454 } 5455 if (va == 0) 5456 panic("%s: too many preinit mappings", __func__); 5457 } else { 5458 /* 5459 * If we have a preinit mapping, re-use it. 5460 */ 5461 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5462 ppim = pmap_preinit_mapping + i; 5463 if (ppim->pa == pa && ppim->sz == size && 5464 (ppim->mode == mode || 5465 (flags & MAPDEV_SETATTR) == 0)) 5466 return ((void *)(ppim->va + offset)); 5467 } 5468 va = kva_alloc(size); 5469 if (va == 0) 5470 panic("%s: Couldn't allocate KVA", __func__); 5471 } 5472 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) { 5473 if ((flags & MAPDEV_SETATTR) == 0 && pmap_initialized) { 5474 m = PHYS_TO_VM_PAGE(pa); 5475 if (m != NULL && VM_PAGE_TO_PHYS(m) == pa) { 5476 pmap_kenter_attr(va + tmpsize, pa + tmpsize, 5477 m->md.pat_mode); 5478 continue; 5479 } 5480 } 5481 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 5482 } 5483 pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize); 5484 pmap_invalidate_cache_range(va, va + size); 5485 return ((void *)(va + offset)); 5486 } 5487 5488 static void 5489 __CONCAT(PMTYPE, unmapdev)(void *p, vm_size_t size) 5490 { 5491 struct pmap_preinit_mapping *ppim; 5492 vm_offset_t offset, va; 5493 int i; 5494 5495 va = (vm_offset_t)p; 5496 if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE) 5497 return; 5498 offset = va & PAGE_MASK; 5499 size = round_page(offset + size); 5500 va = trunc_page(va); 5501 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5502 ppim = pmap_preinit_mapping + i; 5503 if (ppim->va == va && ppim->sz == size) { 5504 if (pmap_initialized) 5505 return; 5506 ppim->pa = 0; 5507 ppim->va = 0; 5508 ppim->sz = 0; 5509 ppim->mode = 0; 5510 if (va + size == virtual_avail) 5511 virtual_avail = va; 5512 return; 5513 } 5514 } 5515 if (pmap_initialized) { 5516 pmap_qremove(va, atop(size)); 5517 kva_free(va, size); 5518 } 5519 } 5520 5521 /* 5522 * Sets the memory attribute for the specified page. 5523 */ 5524 static void 5525 __CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma) 5526 { 5527 5528 m->md.pat_mode = ma; 5529 if ((m->flags & PG_FICTITIOUS) != 0) 5530 return; 5531 5532 /* 5533 * If "m" is a normal page, flush it from the cache. 5534 * See pmap_invalidate_cache_range(). 5535 * 5536 * First, try to find an existing mapping of the page by sf 5537 * buffer. sf_buf_invalidate_cache() modifies mapping and 5538 * flushes the cache. 5539 */ 5540 if (sf_buf_invalidate_cache(m)) 5541 return; 5542 5543 /* 5544 * If page is not mapped by sf buffer, but CPU does not 5545 * support self snoop, map the page transient and do 5546 * invalidation. In the worst case, whole cache is flushed by 5547 * pmap_invalidate_cache_range(). 5548 */ 5549 if ((cpu_feature & CPUID_SS) == 0) 5550 pmap_flush_page(m); 5551 } 5552 5553 static void 5554 __CONCAT(PMTYPE, flush_page)(vm_page_t m) 5555 { 5556 pt_entry_t *cmap_pte2; 5557 struct pcpu *pc; 5558 vm_offset_t sva, eva; 5559 bool useclflushopt; 5560 5561 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0; 5562 if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) { 5563 sched_pin(); 5564 pc = get_pcpu(); 5565 cmap_pte2 = pc->pc_cmap_pte2; 5566 mtx_lock(&pc->pc_cmap_lock); 5567 if (*cmap_pte2) 5568 panic("pmap_flush_page: CMAP2 busy"); 5569 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | 5570 PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 5571 0); 5572 invlcaddr(pc->pc_cmap_addr2); 5573 sva = (vm_offset_t)pc->pc_cmap_addr2; 5574 eva = sva + PAGE_SIZE; 5575 5576 /* 5577 * Use mfence or sfence despite the ordering implied by 5578 * mtx_{un,}lock() because clflush on non-Intel CPUs 5579 * and clflushopt are not guaranteed to be ordered by 5580 * any other instruction. 5581 */ 5582 if (useclflushopt) 5583 sfence(); 5584 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5585 mfence(); 5586 for (; sva < eva; sva += cpu_clflush_line_size) { 5587 if (useclflushopt) 5588 clflushopt(sva); 5589 else 5590 clflush(sva); 5591 } 5592 if (useclflushopt) 5593 sfence(); 5594 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5595 mfence(); 5596 *cmap_pte2 = 0; 5597 sched_unpin(); 5598 mtx_unlock(&pc->pc_cmap_lock); 5599 } else 5600 pmap_invalidate_cache(); 5601 } 5602 5603 /* 5604 * Changes the specified virtual address range's memory type to that given by 5605 * the parameter "mode". The specified virtual address range must be 5606 * completely contained within either the kernel map. 5607 * 5608 * Returns zero if the change completed successfully, and either EINVAL or 5609 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 5610 * of the virtual address range was not mapped, and ENOMEM is returned if 5611 * there was insufficient memory available to complete the change. 5612 */ 5613 static int 5614 __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode) 5615 { 5616 vm_offset_t base, offset, tmpva; 5617 pd_entry_t *pde; 5618 pt_entry_t *pte; 5619 int cache_bits_pte, cache_bits_pde; 5620 boolean_t changed; 5621 5622 base = trunc_page(va); 5623 offset = va & PAGE_MASK; 5624 size = round_page(offset + size); 5625 5626 /* 5627 * Only supported on kernel virtual addresses above the recursive map. 5628 */ 5629 if (base < VM_MIN_KERNEL_ADDRESS) 5630 return (EINVAL); 5631 5632 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); 5633 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); 5634 changed = FALSE; 5635 5636 /* 5637 * Pages that aren't mapped aren't supported. Also break down 5638 * 2/4MB pages into 4KB pages if required. 5639 */ 5640 PMAP_LOCK(kernel_pmap); 5641 for (tmpva = base; tmpva < base + size; ) { 5642 pde = pmap_pde(kernel_pmap, tmpva); 5643 if (*pde == 0) { 5644 PMAP_UNLOCK(kernel_pmap); 5645 return (EINVAL); 5646 } 5647 if (*pde & PG_PS) { 5648 /* 5649 * If the current 2/4MB page already has 5650 * the required memory type, then we need not 5651 * demote this page. Just increment tmpva to 5652 * the next 2/4MB page frame. 5653 */ 5654 if ((*pde & PG_PDE_CACHE) == cache_bits_pde) { 5655 tmpva = trunc_4mpage(tmpva) + NBPDR; 5656 continue; 5657 } 5658 5659 /* 5660 * If the current offset aligns with a 2/4MB 5661 * page frame and there is at least 2/4MB left 5662 * within the range, then we need not break 5663 * down this page into 4KB pages. 5664 */ 5665 if ((tmpva & PDRMASK) == 0 && 5666 tmpva + PDRMASK < base + size) { 5667 tmpva += NBPDR; 5668 continue; 5669 } 5670 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) { 5671 PMAP_UNLOCK(kernel_pmap); 5672 return (ENOMEM); 5673 } 5674 } 5675 pte = vtopte(tmpva); 5676 if (*pte == 0) { 5677 PMAP_UNLOCK(kernel_pmap); 5678 return (EINVAL); 5679 } 5680 tmpva += PAGE_SIZE; 5681 } 5682 PMAP_UNLOCK(kernel_pmap); 5683 5684 /* 5685 * Ok, all the pages exist, so run through them updating their 5686 * cache mode if required. 5687 */ 5688 for (tmpva = base; tmpva < base + size; ) { 5689 pde = pmap_pde(kernel_pmap, tmpva); 5690 if (*pde & PG_PS) { 5691 if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { 5692 pmap_pde_attr(pde, cache_bits_pde); 5693 changed = TRUE; 5694 } 5695 tmpva = trunc_4mpage(tmpva) + NBPDR; 5696 } else { 5697 pte = vtopte(tmpva); 5698 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { 5699 pmap_pte_attr(pte, cache_bits_pte); 5700 changed = TRUE; 5701 } 5702 tmpva += PAGE_SIZE; 5703 } 5704 } 5705 5706 /* 5707 * Flush CPU caches to make sure any data isn't cached that 5708 * shouldn't be, etc. 5709 */ 5710 if (changed) { 5711 pmap_invalidate_range_int(kernel_pmap, base, tmpva); 5712 pmap_invalidate_cache_range(base, tmpva); 5713 } 5714 return (0); 5715 } 5716 5717 /* 5718 * Perform the pmap work for mincore(2). If the page is not both referenced and 5719 * modified by this pmap, returns its physical address so that the caller can 5720 * find other mappings. 5721 */ 5722 static int 5723 __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) 5724 { 5725 pd_entry_t pde; 5726 pt_entry_t pte; 5727 vm_paddr_t pa; 5728 int val; 5729 5730 PMAP_LOCK(pmap); 5731 pde = *pmap_pde(pmap, addr); 5732 if (pde != 0) { 5733 if ((pde & PG_PS) != 0) { 5734 pte = pde; 5735 /* Compute the physical address of the 4KB page. */ 5736 pa = ((pde & PG_PS_FRAME) | (addr & PDRMASK)) & 5737 PG_FRAME; 5738 val = MINCORE_PSIND(1); 5739 } else { 5740 pte = pmap_pte_ufast(pmap, addr, pde); 5741 pa = pte & PG_FRAME; 5742 val = 0; 5743 } 5744 } else { 5745 pte = 0; 5746 pa = 0; 5747 val = 0; 5748 } 5749 if ((pte & PG_V) != 0) { 5750 val |= MINCORE_INCORE; 5751 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5752 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 5753 if ((pte & PG_A) != 0) 5754 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 5755 } 5756 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 5757 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 5758 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 5759 *pap = pa; 5760 } 5761 PMAP_UNLOCK(pmap); 5762 return (val); 5763 } 5764 5765 static void 5766 __CONCAT(PMTYPE, activate)(struct thread *td) 5767 { 5768 pmap_t pmap, oldpmap; 5769 u_int cpuid; 5770 u_int32_t cr3; 5771 5772 critical_enter(); 5773 pmap = vmspace_pmap(td->td_proc->p_vmspace); 5774 oldpmap = PCPU_GET(curpmap); 5775 cpuid = PCPU_GET(cpuid); 5776 #if defined(SMP) 5777 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 5778 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5779 #else 5780 CPU_CLR(cpuid, &oldpmap->pm_active); 5781 CPU_SET(cpuid, &pmap->pm_active); 5782 #endif 5783 #ifdef PMAP_PAE_COMP 5784 cr3 = vtophys(pmap->pm_pdpt); 5785 #else 5786 cr3 = vtophys(pmap->pm_pdir); 5787 #endif 5788 /* 5789 * pmap_activate is for the current thread on the current cpu 5790 */ 5791 td->td_pcb->pcb_cr3 = cr3; 5792 PCPU_SET(curpmap, pmap); 5793 critical_exit(); 5794 } 5795 5796 static void 5797 __CONCAT(PMTYPE, activate_boot)(pmap_t pmap) 5798 { 5799 u_int cpuid; 5800 5801 cpuid = PCPU_GET(cpuid); 5802 #if defined(SMP) 5803 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5804 #else 5805 CPU_SET(cpuid, &pmap->pm_active); 5806 #endif 5807 PCPU_SET(curpmap, pmap); 5808 } 5809 5810 /* 5811 * Increase the starting virtual address of the given mapping if a 5812 * different alignment might result in more superpage mappings. 5813 */ 5814 static void 5815 __CONCAT(PMTYPE, align_superpage)(vm_object_t object, vm_ooffset_t offset, 5816 vm_offset_t *addr, vm_size_t size) 5817 { 5818 vm_offset_t superpage_offset; 5819 5820 if (size < NBPDR) 5821 return; 5822 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 5823 offset += ptoa(object->pg_color); 5824 superpage_offset = offset & PDRMASK; 5825 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 5826 (*addr & PDRMASK) == superpage_offset) 5827 return; 5828 if ((*addr & PDRMASK) < superpage_offset) 5829 *addr = (*addr & ~PDRMASK) + superpage_offset; 5830 else 5831 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 5832 } 5833 5834 static vm_offset_t 5835 __CONCAT(PMTYPE, quick_enter_page)(vm_page_t m) 5836 { 5837 vm_offset_t qaddr; 5838 pt_entry_t *pte; 5839 5840 critical_enter(); 5841 qaddr = PCPU_GET(qmap_addr); 5842 pte = vtopte(qaddr); 5843 5844 KASSERT(*pte == 0, 5845 ("pmap_quick_enter_page: PTE busy %#jx", (uintmax_t)*pte)); 5846 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 5847 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0); 5848 invlpg(qaddr); 5849 5850 return (qaddr); 5851 } 5852 5853 static void 5854 __CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr) 5855 { 5856 vm_offset_t qaddr; 5857 pt_entry_t *pte; 5858 5859 qaddr = PCPU_GET(qmap_addr); 5860 pte = vtopte(qaddr); 5861 5862 KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); 5863 KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); 5864 5865 *pte = 0; 5866 critical_exit(); 5867 } 5868 5869 static vmem_t *pmap_trm_arena; 5870 static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS; 5871 static int trm_guard = PAGE_SIZE; 5872 5873 static int 5874 pmap_trm_import(void *unused __unused, vmem_size_t size, int flags, 5875 vmem_addr_t *addrp) 5876 { 5877 vm_page_t m; 5878 vmem_addr_t af, addr, prev_addr; 5879 pt_entry_t *trm_pte; 5880 5881 prev_addr = atomic_load_int(&pmap_trm_arena_last); 5882 size = round_page(size) + trm_guard; 5883 for (;;) { 5884 if (prev_addr + size < prev_addr || prev_addr + size < size || 5885 prev_addr + size > PMAP_TRM_MAX_ADDRESS) 5886 return (ENOMEM); 5887 addr = prev_addr + size; 5888 if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr)) 5889 break; 5890 } 5891 prev_addr += trm_guard; 5892 trm_pte = PTmap + atop(prev_addr); 5893 for (af = prev_addr; af < addr; af += PAGE_SIZE) { 5894 m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK); 5895 pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | 5896 PG_M | PG_A | PG_RW | PG_V | pgeflag | 5897 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); 5898 } 5899 *addrp = prev_addr; 5900 return (0); 5901 } 5902 5903 void 5904 pmap_init_trm(void) 5905 { 5906 vm_page_t pd_m; 5907 5908 TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard); 5909 if ((trm_guard & PAGE_MASK) != 0) 5910 trm_guard = 0; 5911 pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); 5912 vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); 5913 pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK | 5914 VM_ALLOC_ZERO); 5915 PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | 5916 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); 5917 } 5918 5919 static void * 5920 __CONCAT(PMTYPE, trm_alloc)(size_t size, int flags) 5921 { 5922 vmem_addr_t res; 5923 int error; 5924 5925 MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0); 5926 error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int), 5927 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res); 5928 if (error != 0) 5929 return (NULL); 5930 if ((flags & M_ZERO) != 0) 5931 bzero((void *)res, size); 5932 return ((void *)res); 5933 } 5934 5935 static void 5936 __CONCAT(PMTYPE, trm_free)(void *addr, size_t size) 5937 { 5938 5939 vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4)); 5940 } 5941 5942 static void 5943 __CONCAT(PMTYPE, ksetrw)(vm_offset_t va) 5944 { 5945 5946 *vtopte(va) |= PG_RW; 5947 } 5948 5949 static void 5950 __CONCAT(PMTYPE, remap_lowptdi)(bool enable) 5951 { 5952 5953 PTD[KPTDI] = enable ? PTD[LOWPTDI] : 0; 5954 invltlb_glob(); 5955 } 5956 5957 static vm_offset_t 5958 __CONCAT(PMTYPE, get_map_low)(void) 5959 { 5960 5961 return (PMAP_MAP_LOW); 5962 } 5963 5964 static vm_offset_t 5965 __CONCAT(PMTYPE, get_vm_maxuser_address)(void) 5966 { 5967 5968 return (VM_MAXUSER_ADDRESS); 5969 } 5970 5971 static vm_paddr_t 5972 __CONCAT(PMTYPE, pg_frame)(vm_paddr_t pa) 5973 { 5974 5975 return (pa & PG_FRAME); 5976 } 5977 5978 static void 5979 __CONCAT(PMTYPE, sf_buf_map)(struct sf_buf *sf) 5980 { 5981 pt_entry_t opte, *ptep; 5982 5983 /* 5984 * Update the sf_buf's virtual-to-physical mapping, flushing the 5985 * virtual address from the TLB. Since the reference count for 5986 * the sf_buf's old mapping was zero, that mapping is not 5987 * currently in use. Consequently, there is no need to exchange 5988 * the old and new PTEs atomically, even under PAE. 5989 */ 5990 ptep = vtopte(sf->kva); 5991 opte = *ptep; 5992 *ptep = VM_PAGE_TO_PHYS(sf->m) | PG_RW | PG_V | 5993 pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, 0); 5994 5995 /* 5996 * Avoid unnecessary TLB invalidations: If the sf_buf's old 5997 * virtual-to-physical mapping was not used, then any processor 5998 * that has invalidated the sf_buf's virtual address from its TLB 5999 * since the last used mapping need not invalidate again. 6000 */ 6001 #ifdef SMP 6002 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6003 CPU_ZERO(&sf->cpumask); 6004 #else 6005 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6006 pmap_invalidate_page_int(kernel_pmap, sf->kva); 6007 #endif 6008 } 6009 6010 static void 6011 __CONCAT(PMTYPE, cp_slow0_map)(vm_offset_t kaddr, int plen, vm_page_t *ma) 6012 { 6013 pt_entry_t *pte; 6014 int i; 6015 6016 for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) { 6017 *pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) | 6018 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]), 6019 FALSE); 6020 invlpg(kaddr + ptoa(i)); 6021 } 6022 } 6023 6024 static u_int 6025 __CONCAT(PMTYPE, get_kcr3)(void) 6026 { 6027 6028 #ifdef PMAP_PAE_COMP 6029 return ((u_int)IdlePDPT); 6030 #else 6031 return ((u_int)IdlePTD); 6032 #endif 6033 } 6034 6035 static u_int 6036 __CONCAT(PMTYPE, get_cr3)(pmap_t pmap) 6037 { 6038 6039 #ifdef PMAP_PAE_COMP 6040 return ((u_int)vtophys(pmap->pm_pdpt)); 6041 #else 6042 return ((u_int)vtophys(pmap->pm_pdir)); 6043 #endif 6044 } 6045 6046 static caddr_t 6047 __CONCAT(PMTYPE, cmap3)(vm_paddr_t pa, u_int pte_bits) 6048 { 6049 pt_entry_t *pte; 6050 6051 pte = CMAP3; 6052 *pte = pa | pte_bits; 6053 invltlb(); 6054 return (CADDR3); 6055 } 6056 6057 static void 6058 __CONCAT(PMTYPE, basemem_setup)(u_int basemem) 6059 { 6060 pt_entry_t *pte; 6061 int i; 6062 6063 /* 6064 * Map pages between basemem and ISA_HOLE_START, if any, r/w into 6065 * the vm86 page table so that vm86 can scribble on them using 6066 * the vm86 map too. XXX: why 2 ways for this and only 1 way for 6067 * page 0, at least as initialized here? 6068 */ 6069 pte = (pt_entry_t *)vm86paddr; 6070 for (i = basemem / 4; i < 160; i++) 6071 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 6072 } 6073 6074 struct bios16_pmap_handle { 6075 pt_entry_t *pte; 6076 pd_entry_t *ptd; 6077 pt_entry_t orig_ptd; 6078 }; 6079 6080 static void * 6081 __CONCAT(PMTYPE, bios16_enter)(void) 6082 { 6083 struct bios16_pmap_handle *h; 6084 6085 /* 6086 * no page table, so create one and install it. 6087 */ 6088 h = malloc(sizeof(struct bios16_pmap_handle), M_TEMP, M_WAITOK); 6089 h->pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 6090 h->ptd = IdlePTD; 6091 *h->pte = vm86phystk | PG_RW | PG_V; 6092 h->orig_ptd = *h->ptd; 6093 *h->ptd = vtophys(h->pte) | PG_RW | PG_V; 6094 pmap_invalidate_all_int(kernel_pmap); /* XXX insurance for now */ 6095 return (h); 6096 } 6097 6098 static void 6099 __CONCAT(PMTYPE, bios16_leave)(void *arg) 6100 { 6101 struct bios16_pmap_handle *h; 6102 6103 h = arg; 6104 *h->ptd = h->orig_ptd; /* remove page table */ 6105 /* 6106 * XXX only needs to be invlpg(0) but that doesn't work on the 386 6107 */ 6108 pmap_invalidate_all_int(kernel_pmap); 6109 free(h->pte, M_TEMP); /* ... and free it */ 6110 } 6111 6112 struct pmap_kernel_map_range { 6113 vm_offset_t sva; 6114 pt_entry_t attrs; 6115 int ptes; 6116 int pdes; 6117 int pdpes; 6118 }; 6119 6120 static void 6121 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range, 6122 vm_offset_t eva) 6123 { 6124 const char *mode; 6125 int i, pat_idx; 6126 6127 if (eva <= range->sva) 6128 return; 6129 6130 pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true); 6131 for (i = 0; i < PAT_INDEX_SIZE; i++) 6132 if (pat_index[i] == pat_idx) 6133 break; 6134 6135 switch (i) { 6136 case PAT_WRITE_BACK: 6137 mode = "WB"; 6138 break; 6139 case PAT_WRITE_THROUGH: 6140 mode = "WT"; 6141 break; 6142 case PAT_UNCACHEABLE: 6143 mode = "UC"; 6144 break; 6145 case PAT_UNCACHED: 6146 mode = "U-"; 6147 break; 6148 case PAT_WRITE_PROTECTED: 6149 mode = "WP"; 6150 break; 6151 case PAT_WRITE_COMBINING: 6152 mode = "WC"; 6153 break; 6154 default: 6155 printf("%s: unknown PAT mode %#x for range 0x%08x-0x%08x\n", 6156 __func__, pat_idx, range->sva, eva); 6157 mode = "??"; 6158 break; 6159 } 6160 6161 sbuf_printf(sb, "0x%08x-0x%08x r%c%c%c%c %s %d %d %d\n", 6162 range->sva, eva, 6163 (range->attrs & PG_RW) != 0 ? 'w' : '-', 6164 (range->attrs & pg_nx) != 0 ? '-' : 'x', 6165 (range->attrs & PG_U) != 0 ? 'u' : 's', 6166 (range->attrs & PG_G) != 0 ? 'g' : '-', 6167 mode, range->pdpes, range->pdes, range->ptes); 6168 6169 /* Reset to sentinel value. */ 6170 range->sva = 0xffffffff; 6171 } 6172 6173 /* 6174 * Determine whether the attributes specified by a page table entry match those 6175 * being tracked by the current range. This is not quite as simple as a direct 6176 * flag comparison since some PAT modes have multiple representations. 6177 */ 6178 static bool 6179 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs) 6180 { 6181 pt_entry_t diff, mask; 6182 6183 mask = pg_nx | PG_G | PG_RW | PG_U | PG_PDE_CACHE; 6184 diff = (range->attrs ^ attrs) & mask; 6185 if (diff == 0) 6186 return (true); 6187 if ((diff & ~PG_PDE_PAT) == 0 && 6188 pmap_pat_index(kernel_pmap, range->attrs, true) == 6189 pmap_pat_index(kernel_pmap, attrs, true)) 6190 return (true); 6191 return (false); 6192 } 6193 6194 static void 6195 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va, 6196 pt_entry_t attrs) 6197 { 6198 6199 memset(range, 0, sizeof(*range)); 6200 range->sva = va; 6201 range->attrs = attrs; 6202 } 6203 6204 /* 6205 * Given a leaf PTE, derive the mapping's attributes. If they do not match 6206 * those of the current run, dump the address range and its attributes, and 6207 * begin a new run. 6208 */ 6209 static void 6210 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, 6211 vm_offset_t va, pd_entry_t pde, pt_entry_t pte) 6212 { 6213 pt_entry_t attrs; 6214 6215 attrs = pde & (PG_RW | PG_U | pg_nx); 6216 6217 if ((pde & PG_PS) != 0) { 6218 attrs |= pde & (PG_G | PG_PDE_CACHE); 6219 } else if (pte != 0) { 6220 attrs |= pte & pg_nx; 6221 attrs &= pg_nx | (pte & (PG_RW | PG_U)); 6222 attrs |= pte & (PG_G | PG_PTE_CACHE); 6223 6224 /* Canonicalize by always using the PDE PAT bit. */ 6225 if ((attrs & PG_PTE_PAT) != 0) 6226 attrs ^= PG_PDE_PAT | PG_PTE_PAT; 6227 } 6228 6229 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { 6230 sysctl_kmaps_dump(sb, range, va); 6231 sysctl_kmaps_reinit(range, va, attrs); 6232 } 6233 } 6234 6235 static int 6236 __CONCAT(PMTYPE, sysctl_kmaps)(SYSCTL_HANDLER_ARGS) 6237 { 6238 struct pmap_kernel_map_range range; 6239 struct sbuf sbuf, *sb; 6240 pd_entry_t pde; 6241 pt_entry_t *pt, pte; 6242 vm_offset_t sva; 6243 int error; 6244 u_int i, k; 6245 6246 error = sysctl_wire_old_buffer(req, 0); 6247 if (error != 0) 6248 return (error); 6249 sb = &sbuf; 6250 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req); 6251 6252 /* Sentinel value. */ 6253 range.sva = 0xffffffff; 6254 6255 /* 6256 * Iterate over the kernel page tables without holding the 6257 * kernel pmap lock. Kernel page table pages are never freed, 6258 * so at worst we will observe inconsistencies in the output. 6259 */ 6260 for (sva = 0, i = 0; i < NPTEPG * NPGPTD * NPDEPG ;) { 6261 if (i == 0) 6262 sbuf_printf(sb, "\nLow PDE:\n"); 6263 else if (i == LOWPTDI * NPTEPG) 6264 sbuf_printf(sb, "Low PDE dup:\n"); 6265 else if (i == PTDPTDI * NPTEPG) 6266 sbuf_printf(sb, "Recursive map:\n"); 6267 else if (i == KERNPTDI * NPTEPG) 6268 sbuf_printf(sb, "Kernel base:\n"); 6269 else if (i == TRPTDI * NPTEPG) 6270 sbuf_printf(sb, "Trampoline:\n"); 6271 pde = IdlePTD[sva >> PDRSHIFT]; 6272 if ((pde & PG_V) == 0) { 6273 sva = rounddown2(sva, NBPDR); 6274 sysctl_kmaps_dump(sb, &range, sva); 6275 sva += NBPDR; 6276 i += NPTEPG; 6277 continue; 6278 } 6279 if ((pde & PG_PS) != 0) { 6280 sysctl_kmaps_check(sb, &range, sva, pde, 0); 6281 range.pdes++; 6282 sva += NBPDR; 6283 i += NPTEPG; 6284 continue; 6285 } 6286 for (pt = vtopte(sva), k = 0; k < NPTEPG; i++, k++, pt++, 6287 sva += PAGE_SIZE) { 6288 pte = *pt; 6289 if ((pte & PG_V) == 0) { 6290 sysctl_kmaps_dump(sb, &range, sva); 6291 continue; 6292 } 6293 sysctl_kmaps_check(sb, &range, sva, pde, pte); 6294 range.ptes++; 6295 } 6296 } 6297 6298 error = sbuf_finish(sb); 6299 sbuf_delete(sb); 6300 return (error); 6301 } 6302 6303 #define PMM(a) \ 6304 .pm_##a = __CONCAT(PMTYPE, a), 6305 6306 struct pmap_methods __CONCAT(PMTYPE, methods) = { 6307 PMM(ksetrw) 6308 PMM(remap_lower) 6309 PMM(remap_lowptdi) 6310 PMM(align_superpage) 6311 PMM(quick_enter_page) 6312 PMM(quick_remove_page) 6313 PMM(trm_alloc) 6314 PMM(trm_free) 6315 PMM(get_map_low) 6316 PMM(get_vm_maxuser_address) 6317 PMM(kextract) 6318 PMM(pg_frame) 6319 PMM(sf_buf_map) 6320 PMM(cp_slow0_map) 6321 PMM(get_kcr3) 6322 PMM(get_cr3) 6323 PMM(cmap3) 6324 PMM(basemem_setup) 6325 PMM(set_nx) 6326 PMM(bios16_enter) 6327 PMM(bios16_leave) 6328 PMM(bootstrap) 6329 PMM(is_valid_memattr) 6330 PMM(cache_bits) 6331 PMM(ps_enabled) 6332 PMM(pinit0) 6333 PMM(pinit) 6334 PMM(activate) 6335 PMM(activate_boot) 6336 PMM(advise) 6337 PMM(clear_modify) 6338 PMM(change_attr) 6339 PMM(mincore) 6340 PMM(copy) 6341 PMM(copy_page) 6342 PMM(copy_pages) 6343 PMM(zero_page) 6344 PMM(zero_page_area) 6345 PMM(enter) 6346 PMM(enter_object) 6347 PMM(enter_quick) 6348 PMM(kenter_temporary) 6349 PMM(object_init_pt) 6350 PMM(unwire) 6351 PMM(page_exists_quick) 6352 PMM(page_wired_mappings) 6353 PMM(page_is_mapped) 6354 PMM(remove_pages) 6355 PMM(is_modified) 6356 PMM(is_prefaultable) 6357 PMM(is_referenced) 6358 PMM(remove_write) 6359 PMM(ts_referenced) 6360 PMM(mapdev_attr) 6361 PMM(unmapdev) 6362 PMM(page_set_memattr) 6363 PMM(extract) 6364 PMM(extract_and_hold) 6365 PMM(map) 6366 PMM(qenter) 6367 PMM(qremove) 6368 PMM(release) 6369 PMM(remove) 6370 PMM(protect) 6371 PMM(remove_all) 6372 PMM(init) 6373 PMM(init_pat) 6374 PMM(growkernel) 6375 PMM(invalidate_page) 6376 PMM(invalidate_range) 6377 PMM(invalidate_all) 6378 PMM(invalidate_cache) 6379 PMM(flush_page) 6380 PMM(kenter) 6381 PMM(kremove) 6382 PMM(sysctl_kmaps) 6383 }; 6384