1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * the Systems Programming Group of the University of Utah Computer 15 * Science Department and William Jolitz of UUNET Technologies Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. All advertising materials mentioning features or use of this software 26 * must display the following acknowledgement: 27 * This product includes software developed by the University of 28 * California, Berkeley and its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 46 */ 47 /*- 48 * Copyright (c) 2003 Networks Associates Technology, Inc. 49 * All rights reserved. 50 * Copyright (c) 2018 The FreeBSD Foundation 51 * All rights reserved. 52 * 53 * This software was developed for the FreeBSD Project by Jake Burkholder, 54 * Safeport Network Services, and Network Associates Laboratories, the 55 * Security Research Division of Network Associates, Inc. under 56 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 57 * CHATS research program. 58 * 59 * Portions of this software were developed by 60 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from 61 * the FreeBSD Foundation. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 1. Redistributions of source code must retain the above copyright 67 * notice, this list of conditions and the following disclaimer. 68 * 2. Redistributions in binary form must reproduce the above copyright 69 * notice, this list of conditions and the following disclaimer in the 70 * documentation and/or other materials provided with the distribution. 71 * 72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 82 * SUCH DAMAGE. 83 */ 84 85 #include <sys/cdefs.h> 86 __FBSDID("$FreeBSD$"); 87 88 /* 89 * Manages physical address maps. 90 * 91 * Since the information managed by this module is 92 * also stored by the logical address mapping module, 93 * this module may throw away valid virtual-to-physical 94 * mappings at almost any time. However, invalidations 95 * of virtual-to-physical mappings must be done as 96 * requested. 97 * 98 * In order to cope with hardware architectures which 99 * make virtual-to-physical map invalidates expensive, 100 * this module may delay invalidate or reduced protection 101 * operations until such time as they are actually 102 * necessary. This module is given full information as 103 * to which processors are currently using which maps, 104 * and to when physical maps must be made correct. 105 */ 106 107 #include "opt_apic.h" 108 #include "opt_cpu.h" 109 #include "opt_pmap.h" 110 #include "opt_smp.h" 111 #include "opt_vm.h" 112 113 #include <sys/param.h> 114 #include <sys/systm.h> 115 #include <sys/kernel.h> 116 #include <sys/ktr.h> 117 #include <sys/lock.h> 118 #include <sys/malloc.h> 119 #include <sys/mman.h> 120 #include <sys/msgbuf.h> 121 #include <sys/mutex.h> 122 #include <sys/proc.h> 123 #include <sys/rwlock.h> 124 #include <sys/sbuf.h> 125 #include <sys/sf_buf.h> 126 #include <sys/sx.h> 127 #include <sys/vmmeter.h> 128 #include <sys/sched.h> 129 #include <sys/sysctl.h> 130 #include <sys/smp.h> 131 #include <sys/vmem.h> 132 133 #include <vm/vm.h> 134 #include <vm/vm_param.h> 135 #include <vm/vm_kern.h> 136 #include <vm/vm_page.h> 137 #include <vm/vm_map.h> 138 #include <vm/vm_object.h> 139 #include <vm/vm_extern.h> 140 #include <vm/vm_pageout.h> 141 #include <vm/vm_pager.h> 142 #include <vm/vm_phys.h> 143 #include <vm/vm_radix.h> 144 #include <vm/vm_reserv.h> 145 #include <vm/uma.h> 146 147 #ifdef DEV_APIC 148 #include <sys/bus.h> 149 #include <machine/intr_machdep.h> 150 #include <x86/apicvar.h> 151 #endif 152 #include <x86/ifunc.h> 153 #include <machine/bootinfo.h> 154 #include <machine/cpu.h> 155 #include <machine/cputypes.h> 156 #include <machine/md_var.h> 157 #include <machine/pcb.h> 158 #include <machine/specialreg.h> 159 #ifdef SMP 160 #include <machine/smp.h> 161 #endif 162 #include <machine/pmap_base.h> 163 164 #if !defined(DIAGNOSTIC) 165 #ifdef __GNUC_GNU_INLINE__ 166 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline 167 #else 168 #define PMAP_INLINE extern inline 169 #endif 170 #else 171 #define PMAP_INLINE 172 #endif 173 174 #ifdef PV_STATS 175 #define PV_STAT(x) do { x ; } while (0) 176 #else 177 #define PV_STAT(x) do { } while (0) 178 #endif 179 180 #define pa_index(pa) ((pa) >> PDRSHIFT) 181 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 182 183 /* 184 * PTmap is recursive pagemap at top of virtual address space. 185 * Within PTmap, the page directory can be found (third indirection). 186 */ 187 #define PTmap ((pt_entry_t *)(PTDPTDI << PDRSHIFT)) 188 #define PTD ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE))) 189 #define PTDpde ((pd_entry_t *)((PTDPTDI << PDRSHIFT) + (PTDPTDI * PAGE_SIZE) + \ 190 (PTDPTDI * PDESIZE))) 191 192 /* 193 * Translate a virtual address to the kernel virtual address of its page table 194 * entry (PTE). This can be used recursively. If the address of a PTE as 195 * previously returned by this macro is itself given as the argument, then the 196 * address of the page directory entry (PDE) that maps the PTE will be 197 * returned. 198 * 199 * This macro may be used before pmap_bootstrap() is called. 200 */ 201 #define vtopte(va) (PTmap + i386_btop(va)) 202 203 /* 204 * Get PDEs and PTEs for user/kernel address space 205 */ 206 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 207 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 208 209 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 210 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 211 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 212 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 213 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 214 215 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ 216 atomic_clear_int((u_int *)(pte), PG_W)) 217 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 218 219 static int pgeflag = 0; /* PG_G or-in */ 220 static int pseflag = 0; /* PG_PS or-in */ 221 222 static int nkpt = NKPT; 223 224 #ifdef PMAP_PAE_COMP 225 pt_entry_t pg_nx; 226 static uma_zone_t pdptzone; 227 #else 228 #define pg_nx 0 229 #endif 230 231 _Static_assert(VM_MAXUSER_ADDRESS == VADDR(TRPTDI, 0), "VM_MAXUSER_ADDRESS"); 232 _Static_assert(VM_MAX_KERNEL_ADDRESS <= VADDR(PTDPTDI, 0), 233 "VM_MAX_KERNEL_ADDRESS"); 234 _Static_assert(PMAP_MAP_LOW == VADDR(LOWPTDI, 0), "PMAP_MAP_LOW"); 235 _Static_assert(KERNLOAD == (KERNPTDI << PDRSHIFT), "KERNLOAD"); 236 237 extern int pat_works; 238 extern int pg_ps_enabled; 239 240 extern int elf32_nxstack; 241 242 #define PAT_INDEX_SIZE 8 243 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ 244 245 /* 246 * pmap_mapdev support pre initialization (i.e. console) 247 */ 248 #define PMAP_PREINIT_MAPPING_COUNT 8 249 static struct pmap_preinit_mapping { 250 vm_paddr_t pa; 251 vm_offset_t va; 252 vm_size_t sz; 253 int mode; 254 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; 255 static int pmap_initialized; 256 257 static struct rwlock_padalign pvh_global_lock; 258 259 /* 260 * Data for the pv entry allocation mechanism 261 */ 262 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 263 extern int pv_entry_max, pv_entry_count; 264 static int pv_entry_high_water = 0; 265 static struct md_page *pv_table; 266 extern int shpgperproc; 267 268 static struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 269 static int pv_maxchunks; /* How many chunks we have KVA for */ 270 static vm_offset_t pv_vafree; /* freelist stored in the PTE */ 271 272 /* 273 * All those kernel PT submaps that BSD is so fond of 274 */ 275 static pt_entry_t *CMAP3; 276 static pd_entry_t *KPTD; 277 static caddr_t CADDR3; 278 279 /* 280 * Crashdump maps. 281 */ 282 static caddr_t crashdumpmap; 283 284 static pt_entry_t *PMAP1 = NULL, *PMAP2, *PMAP3; 285 static pt_entry_t *PADDR1 = NULL, *PADDR2, *PADDR3; 286 #ifdef SMP 287 static int PMAP1cpu, PMAP3cpu; 288 extern int PMAP1changedcpu; 289 #endif 290 extern int PMAP1changed; 291 extern int PMAP1unchanged; 292 static struct mtx PMAP2mutex; 293 294 /* 295 * Internal flags for pmap_enter()'s helper functions. 296 */ 297 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 298 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 299 300 static void free_pv_chunk(struct pv_chunk *pc); 301 static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 302 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 303 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 304 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, 305 u_int flags); 306 #if VM_NRESERVLEVEL > 0 307 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 308 #endif 309 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 310 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 311 vm_offset_t va); 312 static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); 313 314 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); 315 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 316 static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, 317 vm_prot_t prot); 318 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, 319 u_int flags, vm_page_t m); 320 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 321 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 322 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); 323 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, 324 pd_entry_t pde); 325 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); 326 static boolean_t pmap_is_modified_pvh(struct md_page *pvh); 327 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh); 328 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 329 static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); 330 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); 331 #if VM_NRESERVLEVEL > 0 332 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 333 #endif 334 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, 335 vm_prot_t prot); 336 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); 337 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 338 struct spglist *free); 339 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 340 struct spglist *free); 341 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); 342 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free); 343 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 344 struct spglist *free); 345 static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va); 346 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 347 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 348 vm_page_t m); 349 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, 350 pd_entry_t newpde); 351 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); 352 353 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); 354 355 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); 356 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); 357 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 358 static void pmap_pte_release(pt_entry_t *pte); 359 static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); 360 #ifdef PMAP_PAE_COMP 361 static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, 362 uint8_t *flags, int wait); 363 #endif 364 static void pmap_init_trm(void); 365 static void pmap_invalidate_all_int(pmap_t pmap); 366 367 static __inline void pagezero(void *page); 368 369 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 370 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 371 372 extern char _end[]; 373 extern u_long physfree; /* phys addr of next free page */ 374 extern u_long vm86phystk;/* PA of vm86/bios stack */ 375 extern u_long vm86paddr;/* address of vm86 region */ 376 extern int vm86pa; /* phys addr of vm86 region */ 377 extern u_long KERNend; /* phys addr end of kernel (just after bss) */ 378 #ifdef PMAP_PAE_COMP 379 pd_entry_t *IdlePTD_pae; /* phys addr of kernel PTD */ 380 pdpt_entry_t *IdlePDPT; /* phys addr of kernel PDPT */ 381 pt_entry_t *KPTmap_pae; /* address of kernel page tables */ 382 #define IdlePTD IdlePTD_pae 383 #define KPTmap KPTmap_pae 384 #else 385 pd_entry_t *IdlePTD_nopae; 386 pt_entry_t *KPTmap_nopae; 387 #define IdlePTD IdlePTD_nopae 388 #define KPTmap KPTmap_nopae 389 #endif 390 extern u_long KPTphys; /* phys addr of kernel page tables */ 391 extern u_long tramp_idleptd; 392 393 static u_long 394 allocpages(u_int cnt, u_long *physfree) 395 { 396 u_long res; 397 398 res = *physfree; 399 *physfree += PAGE_SIZE * cnt; 400 bzero((void *)res, PAGE_SIZE * cnt); 401 return (res); 402 } 403 404 static void 405 pmap_cold_map(u_long pa, u_long va, u_long cnt) 406 { 407 pt_entry_t *pt; 408 409 for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0; 410 cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE) 411 *pt = pa | PG_V | PG_RW | PG_A | PG_M; 412 } 413 414 static void 415 pmap_cold_mapident(u_long pa, u_long cnt) 416 { 417 418 pmap_cold_map(pa, pa, cnt); 419 } 420 421 _Static_assert(LOWPTDI * 2 * NBPDR == KERNBASE, 422 "Broken double-map of zero PTD"); 423 424 static void 425 __CONCAT(PMTYPE, remap_lower)(bool enable) 426 { 427 int i; 428 429 for (i = 0; i < LOWPTDI; i++) 430 IdlePTD[i] = enable ? IdlePTD[LOWPTDI + i] : 0; 431 load_cr3(rcr3()); /* invalidate TLB */ 432 } 433 434 /* 435 * Called from locore.s before paging is enabled. Sets up the first 436 * kernel page table. Since kernel is mapped with PA == VA, this code 437 * does not require relocations. 438 */ 439 void 440 __CONCAT(PMTYPE, cold)(void) 441 { 442 pt_entry_t *pt; 443 u_long a; 444 u_int cr3, ncr4; 445 446 physfree = (u_long)&_end; 447 if (bootinfo.bi_esymtab != 0) 448 physfree = bootinfo.bi_esymtab; 449 if (bootinfo.bi_kernend != 0) 450 physfree = bootinfo.bi_kernend; 451 physfree = roundup2(physfree, NBPDR); 452 KERNend = physfree; 453 454 /* Allocate Kernel Page Tables */ 455 KPTphys = allocpages(NKPT, &physfree); 456 KPTmap = (pt_entry_t *)KPTphys; 457 458 /* Allocate Page Table Directory */ 459 #ifdef PMAP_PAE_COMP 460 /* XXX only need 32 bytes (easier for now) */ 461 IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree); 462 #endif 463 IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree); 464 465 /* 466 * Allocate KSTACK. Leave a guard page between IdlePTD and 467 * proc0kstack, to control stack overflow for thread0 and 468 * prevent corruption of the page table. We leak the guard 469 * physical memory due to 1:1 mappings. 470 */ 471 allocpages(1, &physfree); 472 proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree); 473 474 /* vm86/bios stack */ 475 vm86phystk = allocpages(1, &physfree); 476 477 /* pgtable + ext + IOPAGES */ 478 vm86paddr = vm86pa = allocpages(3, &physfree); 479 480 /* Install page tables into PTD. Page table page 1 is wasted. */ 481 for (a = 0; a < NKPT; a++) 482 IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M; 483 484 #ifdef PMAP_PAE_COMP 485 /* PAE install PTD pointers into PDPT */ 486 for (a = 0; a < NPGPTD; a++) 487 IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V; 488 #endif 489 490 /* 491 * Install recursive mapping for kernel page tables into 492 * itself. 493 */ 494 for (a = 0; a < NPGPTD; a++) 495 IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V | 496 PG_RW; 497 498 /* 499 * Initialize page table pages mapping physical address zero 500 * through the (physical) end of the kernel. Many of these 501 * pages must be reserved, and we reserve them all and map 502 * them linearly for convenience. We do this even if we've 503 * enabled PSE above; we'll just switch the corresponding 504 * kernel PDEs before we turn on paging. 505 * 506 * This and all other page table entries allow read and write 507 * access for various reasons. Kernel mappings never have any 508 * access restrictions. 509 */ 510 pmap_cold_mapident(0, atop(NBPDR) * LOWPTDI); 511 pmap_cold_map(0, NBPDR * LOWPTDI, atop(NBPDR) * LOWPTDI); 512 pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE)); 513 514 /* Map page table directory */ 515 #ifdef PMAP_PAE_COMP 516 pmap_cold_mapident((u_long)IdlePDPT, 1); 517 #endif 518 pmap_cold_mapident((u_long)IdlePTD, NPGPTD); 519 520 /* Map early KPTmap. It is really pmap_cold_mapident. */ 521 pmap_cold_map(KPTphys, (u_long)KPTmap, NKPT); 522 523 /* Map proc0kstack */ 524 pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES); 525 /* ISA hole already mapped */ 526 527 pmap_cold_mapident(vm86phystk, 1); 528 pmap_cold_mapident(vm86pa, 3); 529 530 /* Map page 0 into the vm86 page table */ 531 *(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V; 532 533 /* ...likewise for the ISA hole for vm86 */ 534 for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0; 535 a < atop(ISA_HOLE_LENGTH); a++, pt++) 536 *pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A | 537 PG_M | PG_V; 538 539 /* Enable PSE, PGE, VME, and PAE if configured. */ 540 ncr4 = 0; 541 if ((cpu_feature & CPUID_PSE) != 0) { 542 ncr4 |= CR4_PSE; 543 pseflag = PG_PS; 544 /* 545 * Superpage mapping of the kernel text. Existing 4k 546 * page table pages are wasted. 547 */ 548 for (a = KERNBASE; a < KERNend; a += NBPDR) 549 IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M | 550 PG_RW | PG_V; 551 } 552 if ((cpu_feature & CPUID_PGE) != 0) { 553 ncr4 |= CR4_PGE; 554 pgeflag = PG_G; 555 } 556 ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0; 557 #ifdef PMAP_PAE_COMP 558 ncr4 |= CR4_PAE; 559 #endif 560 if (ncr4 != 0) 561 load_cr4(rcr4() | ncr4); 562 563 /* Now enable paging */ 564 #ifdef PMAP_PAE_COMP 565 cr3 = (u_int)IdlePDPT; 566 if ((cpu_feature & CPUID_PAT) == 0) 567 wbinvd(); 568 #else 569 cr3 = (u_int)IdlePTD; 570 #endif 571 tramp_idleptd = cr3; 572 load_cr3(cr3); 573 load_cr0(rcr0() | CR0_PG); 574 575 /* 576 * Now running relocated at KERNBASE where the system is 577 * linked to run. 578 */ 579 580 /* 581 * Remove the lowest part of the double mapping of low memory 582 * to get some null pointer checks. 583 */ 584 __CONCAT(PMTYPE, remap_lower)(false); 585 586 kernel_vm_end = /* 0 + */ NKPT * NBPDR; 587 #ifdef PMAP_PAE_COMP 588 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_PAE; 589 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_PAE; 590 i386_pmap_PDRSHIFT = PDRSHIFT_PAE; 591 #else 592 i386_pmap_VM_NFREEORDER = VM_NFREEORDER_NOPAE; 593 i386_pmap_VM_LEVEL_0_ORDER = VM_LEVEL_0_ORDER_NOPAE; 594 i386_pmap_PDRSHIFT = PDRSHIFT_NOPAE; 595 #endif 596 } 597 598 static void 599 __CONCAT(PMTYPE, set_nx)(void) 600 { 601 602 #ifdef PMAP_PAE_COMP 603 if ((amd_feature & AMDID_NX) == 0) 604 return; 605 pg_nx = PG_NX; 606 elf32_nxstack = 1; 607 /* EFER.EFER_NXE is set in initializecpu(). */ 608 #endif 609 } 610 611 /* 612 * Bootstrap the system enough to run with virtual memory. 613 * 614 * On the i386 this is called after pmap_cold() created initial 615 * kernel page table and enabled paging, and just syncs the pmap 616 * module with what has already been done. 617 */ 618 static void 619 __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr) 620 { 621 vm_offset_t va; 622 pt_entry_t *pte, *unused; 623 struct pcpu *pc; 624 u_long res; 625 int i; 626 627 res = atop(firstaddr - (vm_paddr_t)KERNLOAD); 628 629 /* 630 * Add a physical memory segment (vm_phys_seg) corresponding to the 631 * preallocated kernel page table pages so that vm_page structures 632 * representing these pages will be created. The vm_page structures 633 * are required for promotion of the corresponding kernel virtual 634 * addresses to superpage mappings. 635 */ 636 vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 637 638 /* 639 * Initialize the first available kernel virtual address. 640 * However, using "firstaddr" may waste a few pages of the 641 * kernel virtual address space, because pmap_cold() may not 642 * have mapped every physical page that it allocated. 643 * Preferably, pmap_cold() would provide a first unused 644 * virtual address in addition to "firstaddr". 645 */ 646 virtual_avail = (vm_offset_t)firstaddr; 647 virtual_end = VM_MAX_KERNEL_ADDRESS; 648 649 /* 650 * Initialize the kernel pmap (which is statically allocated). 651 * Count bootstrap data as being resident in case any of this data is 652 * later unmapped (using pmap_remove()) and freed. 653 */ 654 PMAP_LOCK_INIT(kernel_pmap); 655 kernel_pmap->pm_pdir = IdlePTD; 656 #ifdef PMAP_PAE_COMP 657 kernel_pmap->pm_pdpt = IdlePDPT; 658 #endif 659 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 660 kernel_pmap->pm_stats.resident_count = res; 661 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 662 663 /* 664 * Initialize the global pv list lock. 665 */ 666 rw_init(&pvh_global_lock, "pmap pv global"); 667 668 /* 669 * Reserve some special page table entries/VA space for temporary 670 * mapping of pages. 671 */ 672 #define SYSMAP(c, p, v, n) \ 673 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 674 675 va = virtual_avail; 676 pte = vtopte(va); 677 678 679 /* 680 * Initialize temporary map objects on the current CPU for use 681 * during early boot. 682 * CMAP1/CMAP2 are used for zeroing and copying pages. 683 * CMAP3 is used for the boot-time memory test. 684 */ 685 pc = get_pcpu(); 686 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 687 SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1) 688 SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1) 689 SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1) 690 691 SYSMAP(caddr_t, CMAP3, CADDR3, 1); 692 693 /* 694 * Crashdump maps. 695 */ 696 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 697 698 /* 699 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 700 */ 701 SYSMAP(caddr_t, unused, ptvmmap, 1) 702 703 /* 704 * msgbufp is used to map the system message buffer. 705 */ 706 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 707 708 /* 709 * KPTmap is used by pmap_kextract(). 710 * 711 * KPTmap is first initialized by pmap_cold(). However, that initial 712 * KPTmap can only support NKPT page table pages. Here, a larger 713 * KPTmap is created that can support KVA_PAGES page table pages. 714 */ 715 SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) 716 717 for (i = 0; i < NKPT; i++) 718 KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V; 719 720 /* 721 * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), 722 * respectively. 723 */ 724 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 725 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 726 SYSMAP(pt_entry_t *, PMAP3, PADDR3, 1) 727 728 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 729 730 virtual_avail = va; 731 732 /* 733 * Initialize the PAT MSR if present. 734 * pmap_init_pat() clears and sets CR4_PGE, which, as a 735 * side-effect, invalidates stale PG_G TLB entries that might 736 * have been created in our pre-boot environment. We assume 737 * that PAT support implies PGE and in reverse, PGE presence 738 * comes with PAT. Both features were added for Pentium Pro. 739 */ 740 pmap_init_pat(); 741 } 742 743 static void 744 pmap_init_reserved_pages(void) 745 { 746 struct pcpu *pc; 747 vm_offset_t pages; 748 int i; 749 750 #ifdef PMAP_PAE_COMP 751 if (!pae_mode) 752 return; 753 #else 754 if (pae_mode) 755 return; 756 #endif 757 CPU_FOREACH(i) { 758 pc = pcpu_find(i); 759 mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | 760 MTX_NEW); 761 pc->pc_copyout_maddr = kva_alloc(ptoa(2)); 762 if (pc->pc_copyout_maddr == 0) 763 panic("unable to allocate non-sleepable copyout KVA"); 764 sx_init(&pc->pc_copyout_slock, "cpslk"); 765 pc->pc_copyout_saddr = kva_alloc(ptoa(2)); 766 if (pc->pc_copyout_saddr == 0) 767 panic("unable to allocate sleepable copyout KVA"); 768 pc->pc_pmap_eh_va = kva_alloc(ptoa(1)); 769 if (pc->pc_pmap_eh_va == 0) 770 panic("unable to allocate pmap_extract_and_hold KVA"); 771 pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va); 772 773 /* 774 * Skip if the mappings have already been initialized, 775 * i.e. this is the BSP. 776 */ 777 if (pc->pc_cmap_addr1 != 0) 778 continue; 779 780 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 781 pages = kva_alloc(PAGE_SIZE * 3); 782 if (pages == 0) 783 panic("unable to allocate CMAP KVA"); 784 pc->pc_cmap_pte1 = vtopte(pages); 785 pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); 786 pc->pc_cmap_addr1 = (caddr_t)pages; 787 pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); 788 pc->pc_qmap_addr = pages + ptoa(2); 789 } 790 } 791 792 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 793 794 /* 795 * Setup the PAT MSR. 796 */ 797 static void 798 __CONCAT(PMTYPE, init_pat)(void) 799 { 800 int pat_table[PAT_INDEX_SIZE]; 801 uint64_t pat_msr; 802 u_long cr0, cr4; 803 int i; 804 805 /* Set default PAT index table. */ 806 for (i = 0; i < PAT_INDEX_SIZE; i++) 807 pat_table[i] = -1; 808 pat_table[PAT_WRITE_BACK] = 0; 809 pat_table[PAT_WRITE_THROUGH] = 1; 810 pat_table[PAT_UNCACHEABLE] = 3; 811 pat_table[PAT_WRITE_COMBINING] = 3; 812 pat_table[PAT_WRITE_PROTECTED] = 3; 813 pat_table[PAT_UNCACHED] = 3; 814 815 /* 816 * Bail if this CPU doesn't implement PAT. 817 * We assume that PAT support implies PGE. 818 */ 819 if ((cpu_feature & CPUID_PAT) == 0) { 820 for (i = 0; i < PAT_INDEX_SIZE; i++) 821 pat_index[i] = pat_table[i]; 822 pat_works = 0; 823 return; 824 } 825 826 /* 827 * Due to some Intel errata, we can only safely use the lower 4 828 * PAT entries. 829 * 830 * Intel Pentium III Processor Specification Update 831 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 832 * or Mode C Paging) 833 * 834 * Intel Pentium IV Processor Specification Update 835 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 836 */ 837 if (cpu_vendor_id == CPU_VENDOR_INTEL && 838 !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) 839 pat_works = 0; 840 841 /* Initialize default PAT entries. */ 842 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | 843 PAT_VALUE(1, PAT_WRITE_THROUGH) | 844 PAT_VALUE(2, PAT_UNCACHED) | 845 PAT_VALUE(3, PAT_UNCACHEABLE) | 846 PAT_VALUE(4, PAT_WRITE_BACK) | 847 PAT_VALUE(5, PAT_WRITE_THROUGH) | 848 PAT_VALUE(6, PAT_UNCACHED) | 849 PAT_VALUE(7, PAT_UNCACHEABLE); 850 851 if (pat_works) { 852 /* 853 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. 854 * Program 5 and 6 as WP and WC. 855 * Leave 4 and 7 as WB and UC. 856 */ 857 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); 858 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | 859 PAT_VALUE(6, PAT_WRITE_COMBINING); 860 pat_table[PAT_UNCACHED] = 2; 861 pat_table[PAT_WRITE_PROTECTED] = 5; 862 pat_table[PAT_WRITE_COMBINING] = 6; 863 } else { 864 /* 865 * Just replace PAT Index 2 with WC instead of UC-. 866 */ 867 pat_msr &= ~PAT_MASK(2); 868 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 869 pat_table[PAT_WRITE_COMBINING] = 2; 870 } 871 872 /* Disable PGE. */ 873 cr4 = rcr4(); 874 load_cr4(cr4 & ~CR4_PGE); 875 876 /* Disable caches (CD = 1, NW = 0). */ 877 cr0 = rcr0(); 878 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 879 880 /* Flushes caches and TLBs. */ 881 wbinvd(); 882 invltlb(); 883 884 /* Update PAT and index table. */ 885 wrmsr(MSR_PAT, pat_msr); 886 for (i = 0; i < PAT_INDEX_SIZE; i++) 887 pat_index[i] = pat_table[i]; 888 889 /* Flush caches and TLBs again. */ 890 wbinvd(); 891 invltlb(); 892 893 /* Restore caches and PGE. */ 894 load_cr0(cr0); 895 load_cr4(cr4); 896 } 897 898 #ifdef PMAP_PAE_COMP 899 static void * 900 pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 901 int wait) 902 { 903 904 /* Inform UMA that this allocator uses kernel_map/object. */ 905 *flags = UMA_SLAB_KERNEL; 906 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 907 bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 908 } 909 #endif 910 911 /* 912 * Abuse the pte nodes for unmapped kva to thread a kva freelist through. 913 * Requirements: 914 * - Must deal with pages in order to ensure that none of the PG_* bits 915 * are ever set, PG_V in particular. 916 * - Assumes we can write to ptes without pte_store() atomic ops, even 917 * on PAE systems. This should be ok. 918 * - Assumes nothing will ever test these addresses for 0 to indicate 919 * no mapping instead of correctly checking PG_V. 920 * - Assumes a vm_offset_t will fit in a pte (true for i386). 921 * Because PG_V is never set, there can be no mappings to invalidate. 922 */ 923 static vm_offset_t 924 pmap_ptelist_alloc(vm_offset_t *head) 925 { 926 pt_entry_t *pte; 927 vm_offset_t va; 928 929 va = *head; 930 if (va == 0) 931 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 932 pte = vtopte(va); 933 *head = *pte; 934 if (*head & PG_V) 935 panic("pmap_ptelist_alloc: va with PG_V set!"); 936 *pte = 0; 937 return (va); 938 } 939 940 static void 941 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 942 { 943 pt_entry_t *pte; 944 945 if (va & PG_V) 946 panic("pmap_ptelist_free: freeing va with PG_V set!"); 947 pte = vtopte(va); 948 *pte = *head; /* virtual! PG_V is 0 though */ 949 *head = va; 950 } 951 952 static void 953 pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 954 { 955 int i; 956 vm_offset_t va; 957 958 *head = 0; 959 for (i = npages - 1; i >= 0; i--) { 960 va = (vm_offset_t)base + i * PAGE_SIZE; 961 pmap_ptelist_free(head, va); 962 } 963 } 964 965 966 /* 967 * Initialize the pmap module. 968 * Called by vm_init, to initialize any structures that the pmap 969 * system needs to map virtual memory. 970 */ 971 static void 972 __CONCAT(PMTYPE, init)(void) 973 { 974 struct pmap_preinit_mapping *ppim; 975 vm_page_t mpte; 976 vm_size_t s; 977 int i, pv_npg; 978 979 /* 980 * Initialize the vm page array entries for the kernel pmap's 981 * page table pages. 982 */ 983 PMAP_LOCK(kernel_pmap); 984 for (i = 0; i < NKPT; i++) { 985 mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); 986 KASSERT(mpte >= vm_page_array && 987 mpte < &vm_page_array[vm_page_array_size], 988 ("pmap_init: page table page is out of range")); 989 mpte->pindex = i + KPTDI; 990 mpte->phys_addr = KPTphys + ptoa(i); 991 mpte->ref_count = 1; 992 993 /* 994 * Collect the page table pages that were replaced by a 2/4MB 995 * page. They are filled with equivalent 4KB page mappings. 996 */ 997 if (pseflag != 0 && 998 KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && 999 pmap_insert_pt_page(kernel_pmap, mpte, true)) 1000 panic("pmap_init: pmap_insert_pt_page failed"); 1001 } 1002 PMAP_UNLOCK(kernel_pmap); 1003 vm_wire_add(NKPT); 1004 1005 /* 1006 * Initialize the address space (zone) for the pv entries. Set a 1007 * high water mark so that the system can recover from excessive 1008 * numbers of pv entries. 1009 */ 1010 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1011 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1012 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1013 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1014 pv_entry_high_water = 9 * (pv_entry_max / 10); 1015 1016 /* 1017 * If the kernel is running on a virtual machine, then it must assume 1018 * that MCA is enabled by the hypervisor. Moreover, the kernel must 1019 * be prepared for the hypervisor changing the vendor and family that 1020 * are reported by CPUID. Consequently, the workaround for AMD Family 1021 * 10h Erratum 383 is enabled if the processor's feature set does not 1022 * include at least one feature that is only supported by older Intel 1023 * or newer AMD processors. 1024 */ 1025 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 && 1026 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | 1027 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | 1028 AMDID2_FMA4)) == 0) 1029 workaround_erratum383 = 1; 1030 1031 /* 1032 * Are large page mappings supported and enabled? 1033 */ 1034 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 1035 if (pseflag == 0) 1036 pg_ps_enabled = 0; 1037 else if (pg_ps_enabled) { 1038 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1039 ("pmap_init: can't assign to pagesizes[1]")); 1040 pagesizes[1] = NBPDR; 1041 } 1042 1043 /* 1044 * Calculate the size of the pv head table for superpages. 1045 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1046 */ 1047 pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end - 1048 PAGE_SIZE) / NBPDR + 1; 1049 1050 /* 1051 * Allocate memory for the pv head table for superpages. 1052 */ 1053 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1054 s = round_page(s); 1055 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); 1056 for (i = 0; i < pv_npg; i++) 1057 TAILQ_INIT(&pv_table[i].pv_list); 1058 1059 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1060 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1061 if (pv_chunkbase == NULL) 1062 panic("pmap_init: not enough kvm for pv chunks"); 1063 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1064 #ifdef PMAP_PAE_COMP 1065 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 1066 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 1067 UMA_ZONE_CONTIG | UMA_ZONE_VM | UMA_ZONE_NOFREE); 1068 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 1069 #endif 1070 1071 pmap_initialized = 1; 1072 pmap_init_trm(); 1073 1074 if (!bootverbose) 1075 return; 1076 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 1077 ppim = pmap_preinit_mapping + i; 1078 if (ppim->va == 0) 1079 continue; 1080 printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, 1081 (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); 1082 } 1083 1084 } 1085 1086 extern u_long pmap_pde_demotions; 1087 extern u_long pmap_pde_mappings; 1088 extern u_long pmap_pde_p_failures; 1089 extern u_long pmap_pde_promotions; 1090 1091 /*************************************************** 1092 * Low level helper routines..... 1093 ***************************************************/ 1094 1095 static boolean_t 1096 __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode) 1097 { 1098 1099 return (mode >= 0 && mode < PAT_INDEX_SIZE && 1100 pat_index[(int)mode] >= 0); 1101 } 1102 1103 /* 1104 * Determine the appropriate bits to set in a PTE or PDE for a specified 1105 * caching mode. 1106 */ 1107 static int 1108 __CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde) 1109 { 1110 int cache_bits, pat_flag, pat_idx; 1111 1112 if (!pmap_is_valid_memattr(pmap, mode)) 1113 panic("Unknown caching mode %d\n", mode); 1114 1115 /* The PAT bit is different for PTE's and PDE's. */ 1116 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1117 1118 /* Map the caching mode to a PAT index. */ 1119 pat_idx = pat_index[mode]; 1120 1121 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 1122 cache_bits = 0; 1123 if (pat_idx & 0x4) 1124 cache_bits |= pat_flag; 1125 if (pat_idx & 0x2) 1126 cache_bits |= PG_NC_PCD; 1127 if (pat_idx & 0x1) 1128 cache_bits |= PG_NC_PWT; 1129 return (cache_bits); 1130 } 1131 1132 static int 1133 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde) 1134 { 1135 int pat_flag, pat_idx; 1136 1137 if ((cpu_feature & CPUID_PAT) == 0) 1138 return (0); 1139 1140 pat_idx = 0; 1141 /* The PAT bit is different for PTE's and PDE's. */ 1142 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 1143 1144 if ((pte & pat_flag) != 0) 1145 pat_idx |= 0x4; 1146 if ((pte & PG_NC_PCD) != 0) 1147 pat_idx |= 0x2; 1148 if ((pte & PG_NC_PWT) != 0) 1149 pat_idx |= 0x1; 1150 1151 /* See pmap_init_pat(). */ 1152 if (pat_works) { 1153 if (pat_idx == 4) 1154 pat_idx = 0; 1155 if (pat_idx == 7) 1156 pat_idx = 3; 1157 } else { 1158 /* XXXKIB */ 1159 } 1160 1161 return (pat_idx); 1162 } 1163 1164 static bool 1165 __CONCAT(PMTYPE, ps_enabled)(pmap_t pmap __unused) 1166 { 1167 1168 return (pg_ps_enabled); 1169 } 1170 1171 /* 1172 * The caller is responsible for maintaining TLB consistency. 1173 */ 1174 static void 1175 pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) 1176 { 1177 pd_entry_t *pde; 1178 1179 pde = pmap_pde(kernel_pmap, va); 1180 pde_store(pde, newpde); 1181 } 1182 1183 /* 1184 * After changing the page size for the specified virtual address in the page 1185 * table, flush the corresponding entries from the processor's TLB. Only the 1186 * calling processor's TLB is affected. 1187 * 1188 * The calling thread must be pinned to a processor. 1189 */ 1190 static void 1191 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) 1192 { 1193 1194 if ((newpde & PG_PS) == 0) 1195 /* Demotion: flush a specific 2MB page mapping. */ 1196 invlpg(va); 1197 else /* if ((newpde & PG_G) == 0) */ 1198 /* 1199 * Promotion: flush every 4KB page mapping from the TLB 1200 * because there are too many to flush individually. 1201 */ 1202 invltlb(); 1203 } 1204 1205 #ifdef SMP 1206 1207 static void 1208 pmap_curcpu_cb_dummy(pmap_t pmap __unused, vm_offset_t addr1 __unused, 1209 vm_offset_t addr2 __unused) 1210 { 1211 } 1212 1213 /* 1214 * For SMP, these functions have to use the IPI mechanism for coherence. 1215 * 1216 * N.B.: Before calling any of the following TLB invalidation functions, 1217 * the calling processor must ensure that all stores updating a non- 1218 * kernel page table are globally performed. Otherwise, another 1219 * processor could cache an old, pre-update entry without being 1220 * invalidated. This can happen one of two ways: (1) The pmap becomes 1221 * active on another processor after its pm_active field is checked by 1222 * one of the following functions but before a store updating the page 1223 * table is globally performed. (2) The pmap becomes active on another 1224 * processor before its pm_active field is checked but due to 1225 * speculative loads one of the following functions stills reads the 1226 * pmap as inactive on the other processor. 1227 * 1228 * The kernel page table is exempt because its pm_active field is 1229 * immutable. The kernel page table is always active on every 1230 * processor. 1231 */ 1232 static void 1233 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1234 { 1235 cpuset_t *mask, other_cpus; 1236 u_int cpuid; 1237 1238 sched_pin(); 1239 if (pmap == kernel_pmap) { 1240 invlpg(va); 1241 mask = &all_cpus; 1242 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1243 mask = &all_cpus; 1244 } else { 1245 cpuid = PCPU_GET(cpuid); 1246 other_cpus = all_cpus; 1247 CPU_CLR(cpuid, &other_cpus); 1248 CPU_AND(&other_cpus, &pmap->pm_active); 1249 mask = &other_cpus; 1250 } 1251 smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy); 1252 sched_unpin(); 1253 } 1254 1255 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ 1256 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) 1257 1258 static void 1259 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1260 { 1261 cpuset_t *mask, other_cpus; 1262 vm_offset_t addr; 1263 u_int cpuid; 1264 1265 if (eva - sva >= PMAP_INVLPG_THRESHOLD) { 1266 pmap_invalidate_all_int(pmap); 1267 return; 1268 } 1269 1270 sched_pin(); 1271 if (pmap == kernel_pmap) { 1272 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1273 invlpg(addr); 1274 mask = &all_cpus; 1275 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1276 mask = &all_cpus; 1277 } else { 1278 cpuid = PCPU_GET(cpuid); 1279 other_cpus = all_cpus; 1280 CPU_CLR(cpuid, &other_cpus); 1281 CPU_AND(&other_cpus, &pmap->pm_active); 1282 mask = &other_cpus; 1283 } 1284 smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy); 1285 sched_unpin(); 1286 } 1287 1288 static void 1289 pmap_invalidate_all_int(pmap_t pmap) 1290 { 1291 cpuset_t *mask, other_cpus; 1292 u_int cpuid; 1293 1294 sched_pin(); 1295 if (pmap == kernel_pmap) { 1296 invltlb(); 1297 mask = &all_cpus; 1298 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { 1299 mask = &all_cpus; 1300 } else { 1301 cpuid = PCPU_GET(cpuid); 1302 other_cpus = all_cpus; 1303 CPU_CLR(cpuid, &other_cpus); 1304 CPU_AND(&other_cpus, &pmap->pm_active); 1305 mask = &other_cpus; 1306 } 1307 smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy); 1308 sched_unpin(); 1309 } 1310 1311 static void 1312 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, 1313 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) 1314 { 1315 wbinvd(); 1316 } 1317 1318 static void 1319 __CONCAT(PMTYPE, invalidate_cache)(void) 1320 { 1321 smp_cache_flush(pmap_invalidate_cache_curcpu_cb); 1322 } 1323 1324 struct pde_action { 1325 cpuset_t invalidate; /* processors that invalidate their TLB */ 1326 vm_offset_t va; 1327 pd_entry_t *pde; 1328 pd_entry_t newpde; 1329 u_int store; /* processor that updates the PDE */ 1330 }; 1331 1332 static void 1333 pmap_update_pde_kernel(void *arg) 1334 { 1335 struct pde_action *act = arg; 1336 pd_entry_t *pde; 1337 1338 if (act->store == PCPU_GET(cpuid)) { 1339 pde = pmap_pde(kernel_pmap, act->va); 1340 pde_store(pde, act->newpde); 1341 } 1342 } 1343 1344 static void 1345 pmap_update_pde_user(void *arg) 1346 { 1347 struct pde_action *act = arg; 1348 1349 if (act->store == PCPU_GET(cpuid)) 1350 pde_store(act->pde, act->newpde); 1351 } 1352 1353 static void 1354 pmap_update_pde_teardown(void *arg) 1355 { 1356 struct pde_action *act = arg; 1357 1358 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) 1359 pmap_update_pde_invalidate(act->va, act->newpde); 1360 } 1361 1362 /* 1363 * Change the page size for the specified virtual address in a way that 1364 * prevents any possibility of the TLB ever having two entries that map the 1365 * same virtual address using different page sizes. This is the recommended 1366 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a 1367 * machine check exception for a TLB state that is improperly diagnosed as a 1368 * hardware error. 1369 */ 1370 static void 1371 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1372 { 1373 struct pde_action act; 1374 cpuset_t active, other_cpus; 1375 u_int cpuid; 1376 1377 sched_pin(); 1378 cpuid = PCPU_GET(cpuid); 1379 other_cpus = all_cpus; 1380 CPU_CLR(cpuid, &other_cpus); 1381 if (pmap == kernel_pmap) 1382 active = all_cpus; 1383 else 1384 active = pmap->pm_active; 1385 if (CPU_OVERLAP(&active, &other_cpus)) { 1386 act.store = cpuid; 1387 act.invalidate = active; 1388 act.va = va; 1389 act.pde = pde; 1390 act.newpde = newpde; 1391 CPU_SET(cpuid, &active); 1392 smp_rendezvous_cpus(active, 1393 smp_no_rendezvous_barrier, pmap == kernel_pmap ? 1394 pmap_update_pde_kernel : pmap_update_pde_user, 1395 pmap_update_pde_teardown, &act); 1396 } else { 1397 if (pmap == kernel_pmap) 1398 pmap_kenter_pde(va, newpde); 1399 else 1400 pde_store(pde, newpde); 1401 if (CPU_ISSET(cpuid, &active)) 1402 pmap_update_pde_invalidate(va, newpde); 1403 } 1404 sched_unpin(); 1405 } 1406 #else /* !SMP */ 1407 /* 1408 * Normal, non-SMP, 486+ invalidation functions. 1409 * We inline these within pmap.c for speed. 1410 */ 1411 static void 1412 pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va) 1413 { 1414 1415 if (pmap == kernel_pmap) 1416 invlpg(va); 1417 } 1418 1419 static void 1420 pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1421 { 1422 vm_offset_t addr; 1423 1424 if (pmap == kernel_pmap) 1425 for (addr = sva; addr < eva; addr += PAGE_SIZE) 1426 invlpg(addr); 1427 } 1428 1429 static void 1430 pmap_invalidate_all_int(pmap_t pmap) 1431 { 1432 1433 if (pmap == kernel_pmap) 1434 invltlb(); 1435 } 1436 1437 static void 1438 __CONCAT(PMTYPE, invalidate_cache)(void) 1439 { 1440 1441 wbinvd(); 1442 } 1443 1444 static void 1445 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) 1446 { 1447 1448 if (pmap == kernel_pmap) 1449 pmap_kenter_pde(va, newpde); 1450 else 1451 pde_store(pde, newpde); 1452 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1453 pmap_update_pde_invalidate(va, newpde); 1454 } 1455 #endif /* !SMP */ 1456 1457 static void 1458 __CONCAT(PMTYPE, invalidate_page)(pmap_t pmap, vm_offset_t va) 1459 { 1460 1461 pmap_invalidate_page_int(pmap, va); 1462 } 1463 1464 static void 1465 __CONCAT(PMTYPE, invalidate_range)(pmap_t pmap, vm_offset_t sva, 1466 vm_offset_t eva) 1467 { 1468 1469 pmap_invalidate_range_int(pmap, sva, eva); 1470 } 1471 1472 static void 1473 __CONCAT(PMTYPE, invalidate_all)(pmap_t pmap) 1474 { 1475 1476 pmap_invalidate_all_int(pmap); 1477 } 1478 1479 static void 1480 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1481 { 1482 1483 /* 1484 * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was 1485 * created by a promotion that did not invalidate the 512 or 1024 4KB 1486 * page mappings that might exist in the TLB. Consequently, at this 1487 * point, the TLB may hold both 4KB and 2- or 4MB page mappings for 1488 * the address range [va, va + NBPDR). Therefore, the entire range 1489 * must be invalidated here. In contrast, when PG_PROMOTED is clear, 1490 * the TLB will not hold any 4KB page mappings for the address range 1491 * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the 1492 * 2- or 4MB page mapping from the TLB. 1493 */ 1494 if ((pde & PG_PROMOTED) != 0) 1495 pmap_invalidate_range_int(pmap, va, va + NBPDR - 1); 1496 else 1497 pmap_invalidate_page_int(pmap, va); 1498 } 1499 1500 /* 1501 * Are we current address space or kernel? 1502 */ 1503 static __inline int 1504 pmap_is_current(pmap_t pmap) 1505 { 1506 1507 return (pmap == kernel_pmap); 1508 } 1509 1510 /* 1511 * If the given pmap is not the current or kernel pmap, the returned pte must 1512 * be released by passing it to pmap_pte_release(). 1513 */ 1514 static pt_entry_t * 1515 __CONCAT(PMTYPE, pte)(pmap_t pmap, vm_offset_t va) 1516 { 1517 pd_entry_t newpf; 1518 pd_entry_t *pde; 1519 1520 pde = pmap_pde(pmap, va); 1521 if (*pde & PG_PS) 1522 return (pde); 1523 if (*pde != 0) { 1524 /* are we current address space or kernel? */ 1525 if (pmap_is_current(pmap)) 1526 return (vtopte(va)); 1527 mtx_lock(&PMAP2mutex); 1528 newpf = *pde & PG_FRAME; 1529 if ((*PMAP2 & PG_FRAME) != newpf) { 1530 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; 1531 pmap_invalidate_page_int(kernel_pmap, 1532 (vm_offset_t)PADDR2); 1533 } 1534 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1535 } 1536 return (NULL); 1537 } 1538 1539 /* 1540 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1541 * being NULL. 1542 */ 1543 static __inline void 1544 pmap_pte_release(pt_entry_t *pte) 1545 { 1546 1547 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) 1548 mtx_unlock(&PMAP2mutex); 1549 } 1550 1551 /* 1552 * NB: The sequence of updating a page table followed by accesses to the 1553 * corresponding pages is subject to the situation described in the "AMD64 1554 * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23, 1555 * "7.3.1 Special Coherency Considerations". Therefore, issuing the INVLPG 1556 * right after modifying the PTE bits is crucial. 1557 */ 1558 static __inline void 1559 invlcaddr(void *caddr) 1560 { 1561 1562 invlpg((u_int)caddr); 1563 } 1564 1565 /* 1566 * Super fast pmap_pte routine best used when scanning 1567 * the pv lists. This eliminates many coarse-grained 1568 * invltlb calls. Note that many of the pv list 1569 * scans are across different pmaps. It is very wasteful 1570 * to do an entire invltlb for checking a single mapping. 1571 * 1572 * If the given pmap is not the current pmap, pvh_global_lock 1573 * must be held and curthread pinned to a CPU. 1574 */ 1575 static pt_entry_t * 1576 pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1577 { 1578 pd_entry_t newpf; 1579 pd_entry_t *pde; 1580 1581 pde = pmap_pde(pmap, va); 1582 if (*pde & PG_PS) 1583 return (pde); 1584 if (*pde != 0) { 1585 /* are we current address space or kernel? */ 1586 if (pmap_is_current(pmap)) 1587 return (vtopte(va)); 1588 rw_assert(&pvh_global_lock, RA_WLOCKED); 1589 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1590 newpf = *pde & PG_FRAME; 1591 if ((*PMAP1 & PG_FRAME) != newpf) { 1592 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; 1593 #ifdef SMP 1594 PMAP1cpu = PCPU_GET(cpuid); 1595 #endif 1596 invlcaddr(PADDR1); 1597 PMAP1changed++; 1598 } else 1599 #ifdef SMP 1600 if (PMAP1cpu != PCPU_GET(cpuid)) { 1601 PMAP1cpu = PCPU_GET(cpuid); 1602 invlcaddr(PADDR1); 1603 PMAP1changedcpu++; 1604 } else 1605 #endif 1606 PMAP1unchanged++; 1607 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1608 } 1609 return (0); 1610 } 1611 1612 static pt_entry_t * 1613 pmap_pte_quick3(pmap_t pmap, vm_offset_t va) 1614 { 1615 pd_entry_t newpf; 1616 pd_entry_t *pde; 1617 1618 pde = pmap_pde(pmap, va); 1619 if (*pde & PG_PS) 1620 return (pde); 1621 if (*pde != 0) { 1622 rw_assert(&pvh_global_lock, RA_WLOCKED); 1623 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1624 newpf = *pde & PG_FRAME; 1625 if ((*PMAP3 & PG_FRAME) != newpf) { 1626 *PMAP3 = newpf | PG_RW | PG_V | PG_A | PG_M; 1627 #ifdef SMP 1628 PMAP3cpu = PCPU_GET(cpuid); 1629 #endif 1630 invlcaddr(PADDR3); 1631 PMAP1changed++; 1632 } else 1633 #ifdef SMP 1634 if (PMAP3cpu != PCPU_GET(cpuid)) { 1635 PMAP3cpu = PCPU_GET(cpuid); 1636 invlcaddr(PADDR3); 1637 PMAP1changedcpu++; 1638 } else 1639 #endif 1640 PMAP1unchanged++; 1641 return (PADDR3 + (i386_btop(va) & (NPTEPG - 1))); 1642 } 1643 return (0); 1644 } 1645 1646 static pt_entry_t 1647 pmap_pte_ufast(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1648 { 1649 pt_entry_t *eh_ptep, pte, *ptep; 1650 1651 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1652 pde &= PG_FRAME; 1653 critical_enter(); 1654 eh_ptep = (pt_entry_t *)PCPU_GET(pmap_eh_ptep); 1655 if ((*eh_ptep & PG_FRAME) != pde) { 1656 *eh_ptep = pde | PG_RW | PG_V | PG_A | PG_M; 1657 invlcaddr((void *)PCPU_GET(pmap_eh_va)); 1658 } 1659 ptep = (pt_entry_t *)PCPU_GET(pmap_eh_va) + (i386_btop(va) & 1660 (NPTEPG - 1)); 1661 pte = *ptep; 1662 critical_exit(); 1663 return (pte); 1664 } 1665 1666 /* 1667 * Extract from the kernel page table the physical address that is mapped by 1668 * the given virtual address "va". 1669 * 1670 * This function may be used before pmap_bootstrap() is called. 1671 */ 1672 static vm_paddr_t 1673 __CONCAT(PMTYPE, kextract)(vm_offset_t va) 1674 { 1675 vm_paddr_t pa; 1676 1677 if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) { 1678 pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 1679 } else { 1680 /* 1681 * Beware of a concurrent promotion that changes the PDE at 1682 * this point! For example, vtopte() must not be used to 1683 * access the PTE because it would use the new PDE. It is, 1684 * however, safe to use the old PDE because the page table 1685 * page is preserved by the promotion. 1686 */ 1687 pa = KPTmap[i386_btop(va)]; 1688 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 1689 } 1690 return (pa); 1691 } 1692 1693 /* 1694 * Routine: pmap_extract 1695 * Function: 1696 * Extract the physical page address associated 1697 * with the given map/virtual_address pair. 1698 */ 1699 static vm_paddr_t 1700 __CONCAT(PMTYPE, extract)(pmap_t pmap, vm_offset_t va) 1701 { 1702 vm_paddr_t rtval; 1703 pt_entry_t pte; 1704 pd_entry_t pde; 1705 1706 rtval = 0; 1707 PMAP_LOCK(pmap); 1708 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1709 if (pde != 0) { 1710 if ((pde & PG_PS) != 0) 1711 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); 1712 else { 1713 pte = pmap_pte_ufast(pmap, va, pde); 1714 rtval = (pte & PG_FRAME) | (va & PAGE_MASK); 1715 } 1716 } 1717 PMAP_UNLOCK(pmap); 1718 return (rtval); 1719 } 1720 1721 /* 1722 * Routine: pmap_extract_and_hold 1723 * Function: 1724 * Atomically extract and hold the physical page 1725 * with the given pmap and virtual address pair 1726 * if that mapping permits the given protection. 1727 */ 1728 static vm_page_t 1729 __CONCAT(PMTYPE, extract_and_hold)(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1730 { 1731 pd_entry_t pde; 1732 pt_entry_t pte; 1733 vm_page_t m; 1734 1735 m = NULL; 1736 PMAP_LOCK(pmap); 1737 pde = *pmap_pde(pmap, va); 1738 if (pde != 0) { 1739 if (pde & PG_PS) { 1740 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) 1741 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1742 (va & PDRMASK)); 1743 } else { 1744 pte = pmap_pte_ufast(pmap, va, pde); 1745 if (pte != 0 && 1746 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) 1747 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1748 } 1749 if (m != NULL && !vm_page_wire_mapped(m)) 1750 m = NULL; 1751 } 1752 PMAP_UNLOCK(pmap); 1753 return (m); 1754 } 1755 1756 /*************************************************** 1757 * Low level mapping routines..... 1758 ***************************************************/ 1759 1760 /* 1761 * Add a wired page to the kva. 1762 * Note: not SMP coherent. 1763 * 1764 * This function may be used before pmap_bootstrap() is called. 1765 */ 1766 static void 1767 __CONCAT(PMTYPE, kenter)(vm_offset_t va, vm_paddr_t pa) 1768 { 1769 pt_entry_t *pte; 1770 1771 pte = vtopte(va); 1772 pte_store(pte, pa | PG_RW | PG_V); 1773 } 1774 1775 static __inline void 1776 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1777 { 1778 pt_entry_t *pte; 1779 1780 pte = vtopte(va); 1781 pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, 1782 mode, 0)); 1783 } 1784 1785 /* 1786 * Remove a page from the kernel pagetables. 1787 * Note: not SMP coherent. 1788 * 1789 * This function may be used before pmap_bootstrap() is called. 1790 */ 1791 static void 1792 __CONCAT(PMTYPE, kremove)(vm_offset_t va) 1793 { 1794 pt_entry_t *pte; 1795 1796 pte = vtopte(va); 1797 pte_clear(pte); 1798 } 1799 1800 /* 1801 * Used to map a range of physical addresses into kernel 1802 * virtual address space. 1803 * 1804 * The value passed in '*virt' is a suggested virtual address for 1805 * the mapping. Architectures which can support a direct-mapped 1806 * physical to virtual region can return the appropriate address 1807 * within that region, leaving '*virt' unchanged. Other 1808 * architectures should map the pages starting at '*virt' and 1809 * update '*virt' with the first usable address after the mapped 1810 * region. 1811 */ 1812 static vm_offset_t 1813 __CONCAT(PMTYPE, map)(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1814 int prot) 1815 { 1816 vm_offset_t va, sva; 1817 vm_paddr_t superpage_offset; 1818 pd_entry_t newpde; 1819 1820 va = *virt; 1821 /* 1822 * Does the physical address range's size and alignment permit at 1823 * least one superpage mapping to be created? 1824 */ 1825 superpage_offset = start & PDRMASK; 1826 if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) { 1827 /* 1828 * Increase the starting virtual address so that its alignment 1829 * does not preclude the use of superpage mappings. 1830 */ 1831 if ((va & PDRMASK) < superpage_offset) 1832 va = (va & ~PDRMASK) + superpage_offset; 1833 else if ((va & PDRMASK) > superpage_offset) 1834 va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; 1835 } 1836 sva = va; 1837 while (start < end) { 1838 if ((start & PDRMASK) == 0 && end - start >= NBPDR && 1839 pseflag != 0) { 1840 KASSERT((va & PDRMASK) == 0, 1841 ("pmap_map: misaligned va %#x", va)); 1842 newpde = start | PG_PS | PG_RW | PG_V; 1843 pmap_kenter_pde(va, newpde); 1844 va += NBPDR; 1845 start += NBPDR; 1846 } else { 1847 pmap_kenter(va, start); 1848 va += PAGE_SIZE; 1849 start += PAGE_SIZE; 1850 } 1851 } 1852 pmap_invalidate_range_int(kernel_pmap, sva, va); 1853 *virt = va; 1854 return (sva); 1855 } 1856 1857 1858 /* 1859 * Add a list of wired pages to the kva 1860 * this routine is only used for temporary 1861 * kernel mappings that do not need to have 1862 * page modification or references recorded. 1863 * Note that old mappings are simply written 1864 * over. The page *must* be wired. 1865 * Note: SMP coherent. Uses a ranged shootdown IPI. 1866 */ 1867 static void 1868 __CONCAT(PMTYPE, qenter)(vm_offset_t sva, vm_page_t *ma, int count) 1869 { 1870 pt_entry_t *endpte, oldpte, pa, *pte; 1871 vm_page_t m; 1872 1873 oldpte = 0; 1874 pte = vtopte(sva); 1875 endpte = pte + count; 1876 while (pte < endpte) { 1877 m = *ma++; 1878 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap, 1879 m->md.pat_mode, 0); 1880 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { 1881 oldpte |= *pte; 1882 pte_store(pte, pa | pg_nx | PG_RW | PG_V); 1883 } 1884 pte++; 1885 } 1886 if (__predict_false((oldpte & PG_V) != 0)) 1887 pmap_invalidate_range_int(kernel_pmap, sva, sva + count * 1888 PAGE_SIZE); 1889 } 1890 1891 /* 1892 * This routine tears out page mappings from the 1893 * kernel -- it is meant only for temporary mappings. 1894 * Note: SMP coherent. Uses a ranged shootdown IPI. 1895 */ 1896 static void 1897 __CONCAT(PMTYPE, qremove)(vm_offset_t sva, int count) 1898 { 1899 vm_offset_t va; 1900 1901 va = sva; 1902 while (count-- > 0) { 1903 pmap_kremove(va); 1904 va += PAGE_SIZE; 1905 } 1906 pmap_invalidate_range_int(kernel_pmap, sva, va); 1907 } 1908 1909 /*************************************************** 1910 * Page table page management routines..... 1911 ***************************************************/ 1912 /* 1913 * Schedule the specified unused page table page to be freed. Specifically, 1914 * add the page to the specified list of pages that will be released to the 1915 * physical memory manager after the TLB has been updated. 1916 */ 1917 static __inline void 1918 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 1919 boolean_t set_PG_ZERO) 1920 { 1921 1922 if (set_PG_ZERO) 1923 m->flags |= PG_ZERO; 1924 else 1925 m->flags &= ~PG_ZERO; 1926 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 1927 } 1928 1929 /* 1930 * Inserts the specified page table page into the specified pmap's collection 1931 * of idle page table pages. Each of a pmap's page table pages is responsible 1932 * for mapping a distinct range of virtual addresses. The pmap's collection is 1933 * ordered by this virtual address range. 1934 * 1935 * If "promoted" is false, then the page table page "mpte" must be zero filled. 1936 */ 1937 static __inline int 1938 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted) 1939 { 1940 1941 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1942 mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0; 1943 return (vm_radix_insert(&pmap->pm_root, mpte)); 1944 } 1945 1946 /* 1947 * Removes the page table page mapping the specified virtual address from the 1948 * specified pmap's collection of idle page table pages, and returns it. 1949 * Otherwise, returns NULL if there is no page table page corresponding to the 1950 * specified virtual address. 1951 */ 1952 static __inline vm_page_t 1953 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) 1954 { 1955 1956 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1957 return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT)); 1958 } 1959 1960 /* 1961 * Decrements a page table page's reference count, which is used to record the 1962 * number of valid page table entries within the page. If the reference count 1963 * drops to zero, then the page table page is unmapped. Returns TRUE if the 1964 * page table page was unmapped and FALSE otherwise. 1965 */ 1966 static inline boolean_t 1967 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1968 { 1969 1970 --m->ref_count; 1971 if (m->ref_count == 0) { 1972 _pmap_unwire_ptp(pmap, m, free); 1973 return (TRUE); 1974 } else 1975 return (FALSE); 1976 } 1977 1978 static void 1979 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) 1980 { 1981 1982 /* 1983 * unmap the page table page 1984 */ 1985 pmap->pm_pdir[m->pindex] = 0; 1986 --pmap->pm_stats.resident_count; 1987 1988 /* 1989 * There is not need to invalidate the recursive mapping since 1990 * we never instantiate such mapping for the usermode pmaps, 1991 * and never remove page table pages from the kernel pmap. 1992 * Put page on a list so that it is released since all TLB 1993 * shootdown is done. 1994 */ 1995 MPASS(pmap != kernel_pmap); 1996 pmap_add_delayed_free_list(m, free, TRUE); 1997 } 1998 1999 /* 2000 * After removing a page table entry, this routine is used to 2001 * conditionally free the page, and manage the reference count. 2002 */ 2003 static int 2004 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) 2005 { 2006 pd_entry_t ptepde; 2007 vm_page_t mpte; 2008 2009 if (pmap == kernel_pmap) 2010 return (0); 2011 ptepde = *pmap_pde(pmap, va); 2012 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 2013 return (pmap_unwire_ptp(pmap, mpte, free)); 2014 } 2015 2016 /* 2017 * Release a page table page reference after a failed attempt to create a 2018 * mapping. 2019 */ 2020 static void 2021 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 2022 { 2023 struct spglist free; 2024 2025 SLIST_INIT(&free); 2026 if (pmap_unwire_ptp(pmap, mpte, &free)) { 2027 /* 2028 * Although "va" was never mapped, paging-structure caches 2029 * could nonetheless have entries that refer to the freed 2030 * page table pages. Invalidate those entries. 2031 */ 2032 pmap_invalidate_page_int(pmap, va); 2033 vm_page_free_pages_toq(&free, true); 2034 } 2035 } 2036 2037 /* 2038 * Initialize the pmap for the swapper process. 2039 */ 2040 static void 2041 __CONCAT(PMTYPE, pinit0)(pmap_t pmap) 2042 { 2043 2044 PMAP_LOCK_INIT(pmap); 2045 pmap->pm_pdir = IdlePTD; 2046 #ifdef PMAP_PAE_COMP 2047 pmap->pm_pdpt = IdlePDPT; 2048 #endif 2049 pmap->pm_root.rt_root = 0; 2050 CPU_ZERO(&pmap->pm_active); 2051 TAILQ_INIT(&pmap->pm_pvchunk); 2052 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2053 pmap_activate_boot(pmap); 2054 } 2055 2056 /* 2057 * Initialize a preallocated and zeroed pmap structure, 2058 * such as one in a vmspace structure. 2059 */ 2060 static int 2061 __CONCAT(PMTYPE, pinit)(pmap_t pmap) 2062 { 2063 vm_page_t m; 2064 int i; 2065 2066 /* 2067 * No need to allocate page table space yet but we do need a valid 2068 * page directory table. 2069 */ 2070 if (pmap->pm_pdir == NULL) { 2071 pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); 2072 if (pmap->pm_pdir == NULL) 2073 return (0); 2074 #ifdef PMAP_PAE_COMP 2075 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 2076 KASSERT(((vm_offset_t)pmap->pm_pdpt & 2077 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 2078 ("pmap_pinit: pdpt misaligned")); 2079 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 2080 ("pmap_pinit: pdpt above 4g")); 2081 #endif 2082 pmap->pm_root.rt_root = 0; 2083 } 2084 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2085 ("pmap_pinit: pmap has reserved page table page(s)")); 2086 2087 /* 2088 * allocate the page directory page(s) 2089 */ 2090 for (i = 0; i < NPGPTD; i++) { 2091 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 2092 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK); 2093 pmap->pm_ptdpg[i] = m; 2094 #ifdef PMAP_PAE_COMP 2095 pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(m) | PG_V; 2096 #endif 2097 } 2098 2099 pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD); 2100 #ifdef PMAP_PAE_COMP 2101 if ((cpu_feature & CPUID_PAT) == 0) { 2102 pmap_invalidate_cache_range( 2103 trunc_page((vm_offset_t)pmap->pm_pdpt), 2104 round_page((vm_offset_t)pmap->pm_pdpt + 2105 NPGPTD * sizeof(pdpt_entry_t))); 2106 } 2107 #endif 2108 2109 for (i = 0; i < NPGPTD; i++) 2110 if ((pmap->pm_ptdpg[i]->flags & PG_ZERO) == 0) 2111 pagezero(pmap->pm_pdir + (i * NPDEPG)); 2112 2113 /* Install the trampoline mapping. */ 2114 pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; 2115 2116 CPU_ZERO(&pmap->pm_active); 2117 TAILQ_INIT(&pmap->pm_pvchunk); 2118 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2119 2120 return (1); 2121 } 2122 2123 /* 2124 * this routine is called if the page table page is not 2125 * mapped correctly. 2126 */ 2127 static vm_page_t 2128 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) 2129 { 2130 vm_paddr_t ptepa; 2131 vm_page_t m; 2132 2133 /* 2134 * Allocate a page table page. 2135 */ 2136 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 2137 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 2138 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2139 PMAP_UNLOCK(pmap); 2140 rw_wunlock(&pvh_global_lock); 2141 vm_wait(NULL); 2142 rw_wlock(&pvh_global_lock); 2143 PMAP_LOCK(pmap); 2144 } 2145 2146 /* 2147 * Indicate the need to retry. While waiting, the page table 2148 * page may have been allocated. 2149 */ 2150 return (NULL); 2151 } 2152 if ((m->flags & PG_ZERO) == 0) 2153 pmap_zero_page(m); 2154 2155 /* 2156 * Map the pagetable page into the process address space, if 2157 * it isn't already there. 2158 */ 2159 2160 pmap->pm_stats.resident_count++; 2161 2162 ptepa = VM_PAGE_TO_PHYS(m); 2163 pmap->pm_pdir[ptepindex] = 2164 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 2165 2166 return (m); 2167 } 2168 2169 static vm_page_t 2170 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) 2171 { 2172 u_int ptepindex; 2173 pd_entry_t ptepa; 2174 vm_page_t m; 2175 2176 /* 2177 * Calculate pagetable page index 2178 */ 2179 ptepindex = va >> PDRSHIFT; 2180 retry: 2181 /* 2182 * Get the page directory entry 2183 */ 2184 ptepa = pmap->pm_pdir[ptepindex]; 2185 2186 /* 2187 * This supports switching from a 4MB page to a 2188 * normal 4K page. 2189 */ 2190 if (ptepa & PG_PS) { 2191 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va); 2192 ptepa = pmap->pm_pdir[ptepindex]; 2193 } 2194 2195 /* 2196 * If the page table page is mapped, we just increment the 2197 * hold count, and activate it. 2198 */ 2199 if (ptepa) { 2200 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 2201 m->ref_count++; 2202 } else { 2203 /* 2204 * Here if the pte page isn't mapped, or if it has 2205 * been deallocated. 2206 */ 2207 m = _pmap_allocpte(pmap, ptepindex, flags); 2208 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2209 goto retry; 2210 } 2211 return (m); 2212 } 2213 2214 2215 /*************************************************** 2216 * Pmap allocation/deallocation routines. 2217 ***************************************************/ 2218 2219 /* 2220 * Release any resources held by the given physical map. 2221 * Called when a pmap initialized by pmap_pinit is being released. 2222 * Should only be called if the map contains no valid mappings. 2223 */ 2224 static void 2225 __CONCAT(PMTYPE, release)(pmap_t pmap) 2226 { 2227 vm_page_t m; 2228 int i; 2229 2230 KASSERT(pmap->pm_stats.resident_count == 0, 2231 ("pmap_release: pmap resident count %ld != 0", 2232 pmap->pm_stats.resident_count)); 2233 KASSERT(vm_radix_is_empty(&pmap->pm_root), 2234 ("pmap_release: pmap has reserved page table page(s)")); 2235 KASSERT(CPU_EMPTY(&pmap->pm_active), 2236 ("releasing active pmap %p", pmap)); 2237 2238 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 2239 2240 for (i = 0; i < NPGPTD; i++) { 2241 m = pmap->pm_ptdpg[i]; 2242 #ifdef PMAP_PAE_COMP 2243 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 2244 ("pmap_release: got wrong ptd page")); 2245 #endif 2246 vm_page_unwire_noq(m); 2247 vm_page_free(m); 2248 } 2249 } 2250 2251 /* 2252 * grow the number of kernel page table entries, if needed 2253 */ 2254 static void 2255 __CONCAT(PMTYPE, growkernel)(vm_offset_t addr) 2256 { 2257 vm_paddr_t ptppaddr; 2258 vm_page_t nkpg; 2259 pd_entry_t newpdir; 2260 2261 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2262 addr = roundup2(addr, NBPDR); 2263 if (addr - 1 >= vm_map_max(kernel_map)) 2264 addr = vm_map_max(kernel_map); 2265 while (kernel_vm_end < addr) { 2266 if (pdir_pde(PTD, kernel_vm_end)) { 2267 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2268 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2269 kernel_vm_end = vm_map_max(kernel_map); 2270 break; 2271 } 2272 continue; 2273 } 2274 2275 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, 2276 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 2277 VM_ALLOC_ZERO); 2278 if (nkpg == NULL) 2279 panic("pmap_growkernel: no memory to grow kernel"); 2280 2281 nkpt++; 2282 2283 if ((nkpg->flags & PG_ZERO) == 0) 2284 pmap_zero_page(nkpg); 2285 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 2286 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 2287 pdir_pde(KPTD, kernel_vm_end) = newpdir; 2288 2289 pmap_kenter_pde(kernel_vm_end, newpdir); 2290 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 2291 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 2292 kernel_vm_end = vm_map_max(kernel_map); 2293 break; 2294 } 2295 } 2296 } 2297 2298 2299 /*************************************************** 2300 * page management routines. 2301 ***************************************************/ 2302 2303 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 2304 CTASSERT(_NPCM == 11); 2305 CTASSERT(_NPCPV == 336); 2306 2307 static __inline struct pv_chunk * 2308 pv_to_chunk(pv_entry_t pv) 2309 { 2310 2311 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 2312 } 2313 2314 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2315 2316 #define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 2317 #define PC_FREE10 0x0000fffful /* Free values for index 10 */ 2318 2319 static const uint32_t pc_freemask[_NPCM] = { 2320 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2321 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2322 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2323 PC_FREE0_9, PC_FREE10 2324 }; 2325 2326 #ifdef PV_STATS 2327 extern int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2328 extern long pv_entry_frees, pv_entry_allocs; 2329 extern int pv_entry_spare; 2330 #endif 2331 2332 /* 2333 * We are in a serious low memory condition. Resort to 2334 * drastic measures to free some pages so we can allocate 2335 * another pv entry chunk. 2336 */ 2337 static vm_page_t 2338 pmap_pv_reclaim(pmap_t locked_pmap) 2339 { 2340 struct pch newtail; 2341 struct pv_chunk *pc; 2342 struct md_page *pvh; 2343 pd_entry_t *pde; 2344 pmap_t pmap; 2345 pt_entry_t *pte, tpte; 2346 pv_entry_t pv; 2347 vm_offset_t va; 2348 vm_page_t m, m_pc; 2349 struct spglist free; 2350 uint32_t inuse; 2351 int bit, field, freed; 2352 2353 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2354 pmap = NULL; 2355 m_pc = NULL; 2356 SLIST_INIT(&free); 2357 TAILQ_INIT(&newtail); 2358 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2359 SLIST_EMPTY(&free))) { 2360 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2361 if (pmap != pc->pc_pmap) { 2362 if (pmap != NULL) { 2363 pmap_invalidate_all_int(pmap); 2364 if (pmap != locked_pmap) 2365 PMAP_UNLOCK(pmap); 2366 } 2367 pmap = pc->pc_pmap; 2368 /* Avoid deadlock and lock recursion. */ 2369 if (pmap > locked_pmap) 2370 PMAP_LOCK(pmap); 2371 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2372 pmap = NULL; 2373 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2374 continue; 2375 } 2376 } 2377 2378 /* 2379 * Destroy every non-wired, 4 KB page mapping in the chunk. 2380 */ 2381 freed = 0; 2382 for (field = 0; field < _NPCM; field++) { 2383 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2384 inuse != 0; inuse &= ~(1UL << bit)) { 2385 bit = bsfl(inuse); 2386 pv = &pc->pc_pventry[field * 32 + bit]; 2387 va = pv->pv_va; 2388 pde = pmap_pde(pmap, va); 2389 if ((*pde & PG_PS) != 0) 2390 continue; 2391 pte = __CONCAT(PMTYPE, pte)(pmap, va); 2392 tpte = *pte; 2393 if ((tpte & PG_W) == 0) 2394 tpte = pte_load_clear(pte); 2395 pmap_pte_release(pte); 2396 if ((tpte & PG_W) != 0) 2397 continue; 2398 KASSERT(tpte != 0, 2399 ("pmap_pv_reclaim: pmap %p va %x zero pte", 2400 pmap, va)); 2401 if ((tpte & PG_G) != 0) 2402 pmap_invalidate_page_int(pmap, va); 2403 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2404 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2405 vm_page_dirty(m); 2406 if ((tpte & PG_A) != 0) 2407 vm_page_aflag_set(m, PGA_REFERENCED); 2408 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2409 if (TAILQ_EMPTY(&m->md.pv_list) && 2410 (m->flags & PG_FICTITIOUS) == 0) { 2411 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2412 if (TAILQ_EMPTY(&pvh->pv_list)) { 2413 vm_page_aflag_clear(m, 2414 PGA_WRITEABLE); 2415 } 2416 } 2417 pc->pc_map[field] |= 1UL << bit; 2418 pmap_unuse_pt(pmap, va, &free); 2419 freed++; 2420 } 2421 } 2422 if (freed == 0) { 2423 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2424 continue; 2425 } 2426 /* Every freed mapping is for a 4 KB page. */ 2427 pmap->pm_stats.resident_count -= freed; 2428 PV_STAT(pv_entry_frees += freed); 2429 PV_STAT(pv_entry_spare += freed); 2430 pv_entry_count -= freed; 2431 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2432 for (field = 0; field < _NPCM; field++) 2433 if (pc->pc_map[field] != pc_freemask[field]) { 2434 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2435 pc_list); 2436 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2437 2438 /* 2439 * One freed pv entry in locked_pmap is 2440 * sufficient. 2441 */ 2442 if (pmap == locked_pmap) 2443 goto out; 2444 break; 2445 } 2446 if (field == _NPCM) { 2447 PV_STAT(pv_entry_spare -= _NPCPV); 2448 PV_STAT(pc_chunk_count--); 2449 PV_STAT(pc_chunk_frees++); 2450 /* Entire chunk is free; return it. */ 2451 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2452 pmap_qremove((vm_offset_t)pc, 1); 2453 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2454 break; 2455 } 2456 } 2457 out: 2458 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2459 if (pmap != NULL) { 2460 pmap_invalidate_all_int(pmap); 2461 if (pmap != locked_pmap) 2462 PMAP_UNLOCK(pmap); 2463 } 2464 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2465 m_pc = SLIST_FIRST(&free); 2466 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2467 /* Recycle a freed page table page. */ 2468 m_pc->ref_count = 1; 2469 } 2470 vm_page_free_pages_toq(&free, true); 2471 return (m_pc); 2472 } 2473 2474 /* 2475 * free the pv_entry back to the free list 2476 */ 2477 static void 2478 free_pv_entry(pmap_t pmap, pv_entry_t pv) 2479 { 2480 struct pv_chunk *pc; 2481 int idx, field, bit; 2482 2483 rw_assert(&pvh_global_lock, RA_WLOCKED); 2484 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2485 PV_STAT(pv_entry_frees++); 2486 PV_STAT(pv_entry_spare++); 2487 pv_entry_count--; 2488 pc = pv_to_chunk(pv); 2489 idx = pv - &pc->pc_pventry[0]; 2490 field = idx / 32; 2491 bit = idx % 32; 2492 pc->pc_map[field] |= 1ul << bit; 2493 for (idx = 0; idx < _NPCM; idx++) 2494 if (pc->pc_map[idx] != pc_freemask[idx]) { 2495 /* 2496 * 98% of the time, pc is already at the head of the 2497 * list. If it isn't already, move it to the head. 2498 */ 2499 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2500 pc)) { 2501 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2502 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2503 pc_list); 2504 } 2505 return; 2506 } 2507 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2508 free_pv_chunk(pc); 2509 } 2510 2511 static void 2512 free_pv_chunk(struct pv_chunk *pc) 2513 { 2514 vm_page_t m; 2515 2516 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2517 PV_STAT(pv_entry_spare -= _NPCPV); 2518 PV_STAT(pc_chunk_count--); 2519 PV_STAT(pc_chunk_frees++); 2520 /* entire chunk is free, return it */ 2521 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2522 pmap_qremove((vm_offset_t)pc, 1); 2523 vm_page_unwire_noq(m); 2524 vm_page_free(m); 2525 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2526 } 2527 2528 /* 2529 * get a new pv_entry, allocating a block from the system 2530 * when needed. 2531 */ 2532 static pv_entry_t 2533 get_pv_entry(pmap_t pmap, boolean_t try) 2534 { 2535 static const struct timeval printinterval = { 60, 0 }; 2536 static struct timeval lastprint; 2537 int bit, field; 2538 pv_entry_t pv; 2539 struct pv_chunk *pc; 2540 vm_page_t m; 2541 2542 rw_assert(&pvh_global_lock, RA_WLOCKED); 2543 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2544 PV_STAT(pv_entry_allocs++); 2545 pv_entry_count++; 2546 if (pv_entry_count > pv_entry_high_water) 2547 if (ratecheck(&lastprint, &printinterval)) 2548 printf("Approaching the limit on PV entries, consider " 2549 "increasing either the vm.pmap.shpgperproc or the " 2550 "vm.pmap.pv_entries tunable.\n"); 2551 retry: 2552 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2553 if (pc != NULL) { 2554 for (field = 0; field < _NPCM; field++) { 2555 if (pc->pc_map[field]) { 2556 bit = bsfl(pc->pc_map[field]); 2557 break; 2558 } 2559 } 2560 if (field < _NPCM) { 2561 pv = &pc->pc_pventry[field * 32 + bit]; 2562 pc->pc_map[field] &= ~(1ul << bit); 2563 /* If this was the last item, move it to tail */ 2564 for (field = 0; field < _NPCM; field++) 2565 if (pc->pc_map[field] != 0) { 2566 PV_STAT(pv_entry_spare--); 2567 return (pv); /* not full, return */ 2568 } 2569 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2570 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2571 PV_STAT(pv_entry_spare--); 2572 return (pv); 2573 } 2574 } 2575 /* 2576 * Access to the ptelist "pv_vafree" is synchronized by the pvh 2577 * global lock. If "pv_vafree" is currently non-empty, it will 2578 * remain non-empty until pmap_ptelist_alloc() completes. 2579 */ 2580 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2581 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2582 if (try) { 2583 pv_entry_count--; 2584 PV_STAT(pc_chunk_tryfail++); 2585 return (NULL); 2586 } 2587 m = pmap_pv_reclaim(pmap); 2588 if (m == NULL) 2589 goto retry; 2590 } 2591 PV_STAT(pc_chunk_count++); 2592 PV_STAT(pc_chunk_allocs++); 2593 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2594 pmap_qenter((vm_offset_t)pc, &m, 1); 2595 pc->pc_pmap = pmap; 2596 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2597 for (field = 1; field < _NPCM; field++) 2598 pc->pc_map[field] = pc_freemask[field]; 2599 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2600 pv = &pc->pc_pventry[0]; 2601 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2602 PV_STAT(pv_entry_spare += _NPCPV - 1); 2603 return (pv); 2604 } 2605 2606 static __inline pv_entry_t 2607 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2608 { 2609 pv_entry_t pv; 2610 2611 rw_assert(&pvh_global_lock, RA_WLOCKED); 2612 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 2613 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2614 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 2615 break; 2616 } 2617 } 2618 return (pv); 2619 } 2620 2621 static void 2622 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2623 { 2624 struct md_page *pvh; 2625 pv_entry_t pv; 2626 vm_offset_t va_last; 2627 vm_page_t m; 2628 2629 rw_assert(&pvh_global_lock, RA_WLOCKED); 2630 KASSERT((pa & PDRMASK) == 0, 2631 ("pmap_pv_demote_pde: pa is not 4mpage aligned")); 2632 2633 /* 2634 * Transfer the 4mpage's pv entry for this mapping to the first 2635 * page's pv list. 2636 */ 2637 pvh = pa_to_pvh(pa); 2638 va = trunc_4mpage(va); 2639 pv = pmap_pvh_remove(pvh, pmap, va); 2640 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 2641 m = PHYS_TO_VM_PAGE(pa); 2642 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2643 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 2644 va_last = va + NBPDR - PAGE_SIZE; 2645 do { 2646 m++; 2647 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2648 ("pmap_pv_demote_pde: page %p is not managed", m)); 2649 va += PAGE_SIZE; 2650 pmap_insert_entry(pmap, va, m); 2651 } while (va < va_last); 2652 } 2653 2654 #if VM_NRESERVLEVEL > 0 2655 static void 2656 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2657 { 2658 struct md_page *pvh; 2659 pv_entry_t pv; 2660 vm_offset_t va_last; 2661 vm_page_t m; 2662 2663 rw_assert(&pvh_global_lock, RA_WLOCKED); 2664 KASSERT((pa & PDRMASK) == 0, 2665 ("pmap_pv_promote_pde: pa is not 4mpage aligned")); 2666 2667 /* 2668 * Transfer the first page's pv entry for this mapping to the 2669 * 4mpage's pv list. Aside from avoiding the cost of a call 2670 * to get_pv_entry(), a transfer avoids the possibility that 2671 * get_pv_entry() calls pmap_collect() and that pmap_collect() 2672 * removes one of the mappings that is being promoted. 2673 */ 2674 m = PHYS_TO_VM_PAGE(pa); 2675 va = trunc_4mpage(va); 2676 pv = pmap_pvh_remove(&m->md, pmap, va); 2677 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 2678 pvh = pa_to_pvh(pa); 2679 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2680 /* Free the remaining NPTEPG - 1 pv entries. */ 2681 va_last = va + NBPDR - PAGE_SIZE; 2682 do { 2683 m++; 2684 va += PAGE_SIZE; 2685 pmap_pvh_free(&m->md, pmap, va); 2686 } while (va < va_last); 2687 } 2688 #endif /* VM_NRESERVLEVEL > 0 */ 2689 2690 static void 2691 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2692 { 2693 pv_entry_t pv; 2694 2695 pv = pmap_pvh_remove(pvh, pmap, va); 2696 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2697 free_pv_entry(pmap, pv); 2698 } 2699 2700 static void 2701 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2702 { 2703 struct md_page *pvh; 2704 2705 rw_assert(&pvh_global_lock, RA_WLOCKED); 2706 pmap_pvh_free(&m->md, pmap, va); 2707 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 2708 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2709 if (TAILQ_EMPTY(&pvh->pv_list)) 2710 vm_page_aflag_clear(m, PGA_WRITEABLE); 2711 } 2712 } 2713 2714 /* 2715 * Create a pv entry for page at pa for 2716 * (pmap, va). 2717 */ 2718 static void 2719 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2720 { 2721 pv_entry_t pv; 2722 2723 rw_assert(&pvh_global_lock, RA_WLOCKED); 2724 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2725 pv = get_pv_entry(pmap, FALSE); 2726 pv->pv_va = va; 2727 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2728 } 2729 2730 /* 2731 * Conditionally create a pv entry. 2732 */ 2733 static boolean_t 2734 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2735 { 2736 pv_entry_t pv; 2737 2738 rw_assert(&pvh_global_lock, RA_WLOCKED); 2739 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2740 if (pv_entry_count < pv_entry_high_water && 2741 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2742 pv->pv_va = va; 2743 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 2744 return (TRUE); 2745 } else 2746 return (FALSE); 2747 } 2748 2749 /* 2750 * Create the pv entries for each of the pages within a superpage. 2751 */ 2752 static bool 2753 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags) 2754 { 2755 struct md_page *pvh; 2756 pv_entry_t pv; 2757 bool noreclaim; 2758 2759 rw_assert(&pvh_global_lock, RA_WLOCKED); 2760 noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; 2761 if ((noreclaim && pv_entry_count >= pv_entry_high_water) || 2762 (pv = get_pv_entry(pmap, noreclaim)) == NULL) 2763 return (false); 2764 pv->pv_va = va; 2765 pvh = pa_to_pvh(pde & PG_PS_FRAME); 2766 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 2767 return (true); 2768 } 2769 2770 /* 2771 * Fills a page table page with mappings to consecutive physical pages. 2772 */ 2773 static void 2774 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 2775 { 2776 pt_entry_t *pte; 2777 2778 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 2779 *pte = newpte; 2780 newpte += PAGE_SIZE; 2781 } 2782 } 2783 2784 /* 2785 * Tries to demote a 2- or 4MB page mapping. If demotion fails, the 2786 * 2- or 4MB page mapping is invalidated. 2787 */ 2788 static boolean_t 2789 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2790 { 2791 pd_entry_t newpde, oldpde; 2792 pt_entry_t *firstpte, newpte; 2793 vm_paddr_t mptepa; 2794 vm_page_t mpte; 2795 struct spglist free; 2796 vm_offset_t sva; 2797 2798 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2799 oldpde = *pde; 2800 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), 2801 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); 2802 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == 2803 NULL) { 2804 KASSERT((oldpde & PG_W) == 0, 2805 ("pmap_demote_pde: page table page for a wired mapping" 2806 " is missing")); 2807 2808 /* 2809 * Invalidate the 2- or 4MB page mapping and return 2810 * "failure" if the mapping was never accessed or the 2811 * allocation of the new page table page fails. 2812 */ 2813 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, 2814 va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | 2815 VM_ALLOC_WIRED)) == NULL) { 2816 SLIST_INIT(&free); 2817 sva = trunc_4mpage(va); 2818 pmap_remove_pde(pmap, pde, sva, &free); 2819 if ((oldpde & PG_G) == 0) 2820 pmap_invalidate_pde_page(pmap, sva, oldpde); 2821 vm_page_free_pages_toq(&free, true); 2822 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" 2823 " in pmap %p", va, pmap); 2824 return (FALSE); 2825 } 2826 if (pmap != kernel_pmap) { 2827 mpte->ref_count = NPTEPG; 2828 pmap->pm_stats.resident_count++; 2829 } 2830 } 2831 mptepa = VM_PAGE_TO_PHYS(mpte); 2832 2833 /* 2834 * If the page mapping is in the kernel's address space, then the 2835 * KPTmap can provide access to the page table page. Otherwise, 2836 * temporarily map the page table page (mpte) into the kernel's 2837 * address space at either PADDR1 or PADDR2. 2838 */ 2839 if (pmap == kernel_pmap) 2840 firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; 2841 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 2842 if ((*PMAP1 & PG_FRAME) != mptepa) { 2843 *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2844 #ifdef SMP 2845 PMAP1cpu = PCPU_GET(cpuid); 2846 #endif 2847 invlcaddr(PADDR1); 2848 PMAP1changed++; 2849 } else 2850 #ifdef SMP 2851 if (PMAP1cpu != PCPU_GET(cpuid)) { 2852 PMAP1cpu = PCPU_GET(cpuid); 2853 invlcaddr(PADDR1); 2854 PMAP1changedcpu++; 2855 } else 2856 #endif 2857 PMAP1unchanged++; 2858 firstpte = PADDR1; 2859 } else { 2860 mtx_lock(&PMAP2mutex); 2861 if ((*PMAP2 & PG_FRAME) != mptepa) { 2862 *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2863 pmap_invalidate_page_int(kernel_pmap, 2864 (vm_offset_t)PADDR2); 2865 } 2866 firstpte = PADDR2; 2867 } 2868 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; 2869 KASSERT((oldpde & PG_A) != 0, 2870 ("pmap_demote_pde: oldpde is missing PG_A")); 2871 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 2872 ("pmap_demote_pde: oldpde is missing PG_M")); 2873 newpte = oldpde & ~PG_PS; 2874 if ((newpte & PG_PDE_PAT) != 0) 2875 newpte ^= PG_PDE_PAT | PG_PTE_PAT; 2876 2877 /* 2878 * If the page table page is not leftover from an earlier promotion, 2879 * initialize it. 2880 */ 2881 if (mpte->valid == 0) 2882 pmap_fill_ptp(firstpte, newpte); 2883 2884 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), 2885 ("pmap_demote_pde: firstpte and newpte map different physical" 2886 " addresses")); 2887 2888 /* 2889 * If the mapping has changed attributes, update the page table 2890 * entries. 2891 */ 2892 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) 2893 pmap_fill_ptp(firstpte, newpte); 2894 2895 /* 2896 * Demote the mapping. This pmap is locked. The old PDE has 2897 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 2898 * set. Thus, there is no danger of a race with another 2899 * processor changing the setting of PG_A and/or PG_M between 2900 * the read above and the store below. 2901 */ 2902 if (workaround_erratum383) 2903 pmap_update_pde(pmap, va, pde, newpde); 2904 else if (pmap == kernel_pmap) 2905 pmap_kenter_pde(va, newpde); 2906 else 2907 pde_store(pde, newpde); 2908 if (firstpte == PADDR2) 2909 mtx_unlock(&PMAP2mutex); 2910 2911 /* 2912 * Invalidate the recursive mapping of the page table page. 2913 */ 2914 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2915 2916 /* 2917 * Demote the pv entry. This depends on the earlier demotion 2918 * of the mapping. Specifically, the (re)creation of a per- 2919 * page pv entry might trigger the execution of pmap_collect(), 2920 * which might reclaim a newly (re)created per-page pv entry 2921 * and destroy the associated mapping. In order to destroy 2922 * the mapping, the PDE must have already changed from mapping 2923 * the 2mpage to referencing the page table page. 2924 */ 2925 if ((oldpde & PG_MANAGED) != 0) 2926 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); 2927 2928 pmap_pde_demotions++; 2929 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" 2930 " in pmap %p", va, pmap); 2931 return (TRUE); 2932 } 2933 2934 /* 2935 * Removes a 2- or 4MB page mapping from the kernel pmap. 2936 */ 2937 static void 2938 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2939 { 2940 pd_entry_t newpde; 2941 vm_paddr_t mptepa; 2942 vm_page_t mpte; 2943 2944 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2945 mpte = pmap_remove_pt_page(pmap, va); 2946 if (mpte == NULL) 2947 panic("pmap_remove_kernel_pde: Missing pt page."); 2948 2949 mptepa = VM_PAGE_TO_PHYS(mpte); 2950 newpde = mptepa | PG_M | PG_A | PG_RW | PG_V; 2951 2952 /* 2953 * If this page table page was unmapped by a promotion, then it 2954 * contains valid mappings. Zero it to invalidate those mappings. 2955 */ 2956 if (mpte->valid != 0) 2957 pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]); 2958 2959 /* 2960 * Remove the mapping. 2961 */ 2962 if (workaround_erratum383) 2963 pmap_update_pde(pmap, va, pde, newpde); 2964 else 2965 pmap_kenter_pde(va, newpde); 2966 2967 /* 2968 * Invalidate the recursive mapping of the page table page. 2969 */ 2970 pmap_invalidate_page_int(pmap, (vm_offset_t)vtopte(va)); 2971 } 2972 2973 /* 2974 * pmap_remove_pde: do the things to unmap a superpage in a process 2975 */ 2976 static void 2977 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 2978 struct spglist *free) 2979 { 2980 struct md_page *pvh; 2981 pd_entry_t oldpde; 2982 vm_offset_t eva, va; 2983 vm_page_t m, mpte; 2984 2985 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2986 KASSERT((sva & PDRMASK) == 0, 2987 ("pmap_remove_pde: sva is not 4mpage aligned")); 2988 oldpde = pte_load_clear(pdq); 2989 if (oldpde & PG_W) 2990 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; 2991 2992 /* 2993 * Machines that don't support invlpg, also don't support 2994 * PG_G. 2995 */ 2996 if ((oldpde & PG_G) != 0) 2997 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 2998 2999 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 3000 if (oldpde & PG_MANAGED) { 3001 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 3002 pmap_pvh_free(pvh, pmap, sva); 3003 eva = sva + NBPDR; 3004 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3005 va < eva; va += PAGE_SIZE, m++) { 3006 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3007 vm_page_dirty(m); 3008 if (oldpde & PG_A) 3009 vm_page_aflag_set(m, PGA_REFERENCED); 3010 if (TAILQ_EMPTY(&m->md.pv_list) && 3011 TAILQ_EMPTY(&pvh->pv_list)) 3012 vm_page_aflag_clear(m, PGA_WRITEABLE); 3013 } 3014 } 3015 if (pmap == kernel_pmap) { 3016 pmap_remove_kernel_pde(pmap, pdq, sva); 3017 } else { 3018 mpte = pmap_remove_pt_page(pmap, sva); 3019 if (mpte != NULL) { 3020 KASSERT(mpte->valid == VM_PAGE_BITS_ALL, 3021 ("pmap_remove_pde: pte page not promoted")); 3022 pmap->pm_stats.resident_count--; 3023 KASSERT(mpte->ref_count == NPTEPG, 3024 ("pmap_remove_pde: pte page ref count error")); 3025 mpte->ref_count = 0; 3026 pmap_add_delayed_free_list(mpte, free, FALSE); 3027 } 3028 } 3029 } 3030 3031 /* 3032 * pmap_remove_pte: do the things to unmap a page in a process 3033 */ 3034 static int 3035 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 3036 struct spglist *free) 3037 { 3038 pt_entry_t oldpte; 3039 vm_page_t m; 3040 3041 rw_assert(&pvh_global_lock, RA_WLOCKED); 3042 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3043 oldpte = pte_load_clear(ptq); 3044 KASSERT(oldpte != 0, 3045 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va)); 3046 if (oldpte & PG_W) 3047 pmap->pm_stats.wired_count -= 1; 3048 /* 3049 * Machines that don't support invlpg, also don't support 3050 * PG_G. 3051 */ 3052 if (oldpte & PG_G) 3053 pmap_invalidate_page_int(kernel_pmap, va); 3054 pmap->pm_stats.resident_count -= 1; 3055 if (oldpte & PG_MANAGED) { 3056 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 3057 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3058 vm_page_dirty(m); 3059 if (oldpte & PG_A) 3060 vm_page_aflag_set(m, PGA_REFERENCED); 3061 pmap_remove_entry(pmap, m, va); 3062 } 3063 return (pmap_unuse_pt(pmap, va, free)); 3064 } 3065 3066 /* 3067 * Remove a single page from a process address space 3068 */ 3069 static void 3070 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 3071 { 3072 pt_entry_t *pte; 3073 3074 rw_assert(&pvh_global_lock, RA_WLOCKED); 3075 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3076 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3077 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) 3078 return; 3079 pmap_remove_pte(pmap, pte, va, free); 3080 pmap_invalidate_page_int(pmap, va); 3081 } 3082 3083 /* 3084 * Removes the specified range of addresses from the page table page. 3085 */ 3086 static bool 3087 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3088 struct spglist *free) 3089 { 3090 pt_entry_t *pte; 3091 bool anyvalid; 3092 3093 rw_assert(&pvh_global_lock, RA_WLOCKED); 3094 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 3095 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3096 anyvalid = false; 3097 for (pte = pmap_pte_quick(pmap, sva); sva != eva; pte++, 3098 sva += PAGE_SIZE) { 3099 if (*pte == 0) 3100 continue; 3101 3102 /* 3103 * The TLB entry for a PG_G mapping is invalidated by 3104 * pmap_remove_pte(). 3105 */ 3106 if ((*pte & PG_G) == 0) 3107 anyvalid = true; 3108 3109 if (pmap_remove_pte(pmap, pte, sva, free)) 3110 break; 3111 } 3112 return (anyvalid); 3113 } 3114 3115 /* 3116 * Remove the given range of addresses from the specified map. 3117 * 3118 * It is assumed that the start and end are properly 3119 * rounded to the page size. 3120 */ 3121 static void 3122 __CONCAT(PMTYPE, remove)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3123 { 3124 vm_offset_t pdnxt; 3125 pd_entry_t ptpaddr; 3126 struct spglist free; 3127 int anyvalid; 3128 3129 /* 3130 * Perform an unsynchronized read. This is, however, safe. 3131 */ 3132 if (pmap->pm_stats.resident_count == 0) 3133 return; 3134 3135 anyvalid = 0; 3136 SLIST_INIT(&free); 3137 3138 rw_wlock(&pvh_global_lock); 3139 sched_pin(); 3140 PMAP_LOCK(pmap); 3141 3142 /* 3143 * special handling of removing one page. a very 3144 * common operation and easy to short circuit some 3145 * code. 3146 */ 3147 if ((sva + PAGE_SIZE == eva) && 3148 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 3149 pmap_remove_page(pmap, sva, &free); 3150 goto out; 3151 } 3152 3153 for (; sva < eva; sva = pdnxt) { 3154 u_int pdirindex; 3155 3156 /* 3157 * Calculate index for next page table. 3158 */ 3159 pdnxt = (sva + NBPDR) & ~PDRMASK; 3160 if (pdnxt < sva) 3161 pdnxt = eva; 3162 if (pmap->pm_stats.resident_count == 0) 3163 break; 3164 3165 pdirindex = sva >> PDRSHIFT; 3166 ptpaddr = pmap->pm_pdir[pdirindex]; 3167 3168 /* 3169 * Weed out invalid mappings. Note: we assume that the page 3170 * directory table is always allocated, and in kernel virtual. 3171 */ 3172 if (ptpaddr == 0) 3173 continue; 3174 3175 /* 3176 * Check for large page. 3177 */ 3178 if ((ptpaddr & PG_PS) != 0) { 3179 /* 3180 * Are we removing the entire large page? If not, 3181 * demote the mapping and fall through. 3182 */ 3183 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3184 /* 3185 * The TLB entry for a PG_G mapping is 3186 * invalidated by pmap_remove_pde(). 3187 */ 3188 if ((ptpaddr & PG_G) == 0) 3189 anyvalid = 1; 3190 pmap_remove_pde(pmap, 3191 &pmap->pm_pdir[pdirindex], sva, &free); 3192 continue; 3193 } else if (!pmap_demote_pde(pmap, 3194 &pmap->pm_pdir[pdirindex], sva)) { 3195 /* The large page mapping was destroyed. */ 3196 continue; 3197 } 3198 } 3199 3200 /* 3201 * Limit our scan to either the end of the va represented 3202 * by the current page table page, or to the end of the 3203 * range being removed. 3204 */ 3205 if (pdnxt > eva) 3206 pdnxt = eva; 3207 3208 if (pmap_remove_ptes(pmap, sva, pdnxt, &free)) 3209 anyvalid = 1; 3210 } 3211 out: 3212 sched_unpin(); 3213 if (anyvalid) 3214 pmap_invalidate_all_int(pmap); 3215 rw_wunlock(&pvh_global_lock); 3216 PMAP_UNLOCK(pmap); 3217 vm_page_free_pages_toq(&free, true); 3218 } 3219 3220 /* 3221 * Routine: pmap_remove_all 3222 * Function: 3223 * Removes this physical page from 3224 * all physical maps in which it resides. 3225 * Reflects back modify bits to the pager. 3226 * 3227 * Notes: 3228 * Original versions of this routine were very 3229 * inefficient because they iteratively called 3230 * pmap_remove (slow...) 3231 */ 3232 3233 static void 3234 __CONCAT(PMTYPE, remove_all)(vm_page_t m) 3235 { 3236 struct md_page *pvh; 3237 pv_entry_t pv; 3238 pmap_t pmap; 3239 pt_entry_t *pte, tpte; 3240 pd_entry_t *pde; 3241 vm_offset_t va; 3242 struct spglist free; 3243 3244 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3245 ("pmap_remove_all: page %p is not managed", m)); 3246 SLIST_INIT(&free); 3247 rw_wlock(&pvh_global_lock); 3248 sched_pin(); 3249 if ((m->flags & PG_FICTITIOUS) != 0) 3250 goto small_mappings; 3251 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3252 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 3253 va = pv->pv_va; 3254 pmap = PV_PMAP(pv); 3255 PMAP_LOCK(pmap); 3256 pde = pmap_pde(pmap, va); 3257 (void)pmap_demote_pde(pmap, pde, va); 3258 PMAP_UNLOCK(pmap); 3259 } 3260 small_mappings: 3261 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3262 pmap = PV_PMAP(pv); 3263 PMAP_LOCK(pmap); 3264 pmap->pm_stats.resident_count--; 3265 pde = pmap_pde(pmap, pv->pv_va); 3266 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" 3267 " a 4mpage in page %p's pv list", m)); 3268 pte = pmap_pte_quick(pmap, pv->pv_va); 3269 tpte = pte_load_clear(pte); 3270 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte", 3271 pmap, pv->pv_va)); 3272 if (tpte & PG_W) 3273 pmap->pm_stats.wired_count--; 3274 if (tpte & PG_A) 3275 vm_page_aflag_set(m, PGA_REFERENCED); 3276 3277 /* 3278 * Update the vm_page_t clean and reference bits. 3279 */ 3280 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3281 vm_page_dirty(m); 3282 pmap_unuse_pt(pmap, pv->pv_va, &free); 3283 pmap_invalidate_page_int(pmap, pv->pv_va); 3284 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 3285 free_pv_entry(pmap, pv); 3286 PMAP_UNLOCK(pmap); 3287 } 3288 vm_page_aflag_clear(m, PGA_WRITEABLE); 3289 sched_unpin(); 3290 rw_wunlock(&pvh_global_lock); 3291 vm_page_free_pages_toq(&free, true); 3292 } 3293 3294 /* 3295 * pmap_protect_pde: do the things to protect a 4mpage in a process 3296 */ 3297 static boolean_t 3298 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) 3299 { 3300 pd_entry_t newpde, oldpde; 3301 vm_page_t m, mt; 3302 boolean_t anychanged; 3303 3304 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3305 KASSERT((sva & PDRMASK) == 0, 3306 ("pmap_protect_pde: sva is not 4mpage aligned")); 3307 anychanged = FALSE; 3308 retry: 3309 oldpde = newpde = *pde; 3310 if ((prot & VM_PROT_WRITE) == 0) { 3311 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == 3312 (PG_MANAGED | PG_M | PG_RW)) { 3313 m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 3314 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 3315 vm_page_dirty(mt); 3316 } 3317 newpde &= ~(PG_RW | PG_M); 3318 } 3319 #ifdef PMAP_PAE_COMP 3320 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3321 newpde |= pg_nx; 3322 #endif 3323 if (newpde != oldpde) { 3324 /* 3325 * As an optimization to future operations on this PDE, clear 3326 * PG_PROMOTED. The impending invalidation will remove any 3327 * lingering 4KB page mappings from the TLB. 3328 */ 3329 if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED)) 3330 goto retry; 3331 if ((oldpde & PG_G) != 0) 3332 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); 3333 else 3334 anychanged = TRUE; 3335 } 3336 return (anychanged); 3337 } 3338 3339 /* 3340 * Set the physical protection on the 3341 * specified range of this map as requested. 3342 */ 3343 static void 3344 __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 3345 vm_prot_t prot) 3346 { 3347 vm_offset_t pdnxt; 3348 pd_entry_t ptpaddr; 3349 pt_entry_t *pte; 3350 boolean_t anychanged, pv_lists_locked; 3351 3352 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 3353 if (prot == VM_PROT_NONE) { 3354 pmap_remove(pmap, sva, eva); 3355 return; 3356 } 3357 3358 #ifdef PMAP_PAE_COMP 3359 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 3360 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 3361 return; 3362 #else 3363 if (prot & VM_PROT_WRITE) 3364 return; 3365 #endif 3366 3367 if (pmap_is_current(pmap)) 3368 pv_lists_locked = FALSE; 3369 else { 3370 pv_lists_locked = TRUE; 3371 resume: 3372 rw_wlock(&pvh_global_lock); 3373 sched_pin(); 3374 } 3375 anychanged = FALSE; 3376 3377 PMAP_LOCK(pmap); 3378 for (; sva < eva; sva = pdnxt) { 3379 pt_entry_t obits, pbits; 3380 u_int pdirindex; 3381 3382 pdnxt = (sva + NBPDR) & ~PDRMASK; 3383 if (pdnxt < sva) 3384 pdnxt = eva; 3385 3386 pdirindex = sva >> PDRSHIFT; 3387 ptpaddr = pmap->pm_pdir[pdirindex]; 3388 3389 /* 3390 * Weed out invalid mappings. Note: we assume that the page 3391 * directory table is always allocated, and in kernel virtual. 3392 */ 3393 if (ptpaddr == 0) 3394 continue; 3395 3396 /* 3397 * Check for large page. 3398 */ 3399 if ((ptpaddr & PG_PS) != 0) { 3400 /* 3401 * Are we protecting the entire large page? If not, 3402 * demote the mapping and fall through. 3403 */ 3404 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 3405 /* 3406 * The TLB entry for a PG_G mapping is 3407 * invalidated by pmap_protect_pde(). 3408 */ 3409 if (pmap_protect_pde(pmap, 3410 &pmap->pm_pdir[pdirindex], sva, prot)) 3411 anychanged = TRUE; 3412 continue; 3413 } else { 3414 if (!pv_lists_locked) { 3415 pv_lists_locked = TRUE; 3416 if (!rw_try_wlock(&pvh_global_lock)) { 3417 if (anychanged) 3418 pmap_invalidate_all_int( 3419 pmap); 3420 PMAP_UNLOCK(pmap); 3421 goto resume; 3422 } 3423 sched_pin(); 3424 } 3425 if (!pmap_demote_pde(pmap, 3426 &pmap->pm_pdir[pdirindex], sva)) { 3427 /* 3428 * The large page mapping was 3429 * destroyed. 3430 */ 3431 continue; 3432 } 3433 } 3434 } 3435 3436 if (pdnxt > eva) 3437 pdnxt = eva; 3438 3439 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 3440 sva += PAGE_SIZE) { 3441 vm_page_t m; 3442 3443 retry: 3444 /* 3445 * Regardless of whether a pte is 32 or 64 bits in 3446 * size, PG_RW, PG_A, and PG_M are among the least 3447 * significant 32 bits. 3448 */ 3449 obits = pbits = *pte; 3450 if ((pbits & PG_V) == 0) 3451 continue; 3452 3453 if ((prot & VM_PROT_WRITE) == 0) { 3454 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 3455 (PG_MANAGED | PG_M | PG_RW)) { 3456 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 3457 vm_page_dirty(m); 3458 } 3459 pbits &= ~(PG_RW | PG_M); 3460 } 3461 #ifdef PMAP_PAE_COMP 3462 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3463 pbits |= pg_nx; 3464 #endif 3465 3466 if (pbits != obits) { 3467 #ifdef PMAP_PAE_COMP 3468 if (!atomic_cmpset_64(pte, obits, pbits)) 3469 goto retry; 3470 #else 3471 if (!atomic_cmpset_int((u_int *)pte, obits, 3472 pbits)) 3473 goto retry; 3474 #endif 3475 if (obits & PG_G) 3476 pmap_invalidate_page_int(pmap, sva); 3477 else 3478 anychanged = TRUE; 3479 } 3480 } 3481 } 3482 if (anychanged) 3483 pmap_invalidate_all_int(pmap); 3484 if (pv_lists_locked) { 3485 sched_unpin(); 3486 rw_wunlock(&pvh_global_lock); 3487 } 3488 PMAP_UNLOCK(pmap); 3489 } 3490 3491 #if VM_NRESERVLEVEL > 0 3492 /* 3493 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are 3494 * within a single page table page (PTP) to a single 2- or 4MB page mapping. 3495 * For promotion to occur, two conditions must be met: (1) the 4KB page 3496 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3497 * mappings must have identical characteristics. 3498 * 3499 * Managed (PG_MANAGED) mappings within the kernel address space are not 3500 * promoted. The reason is that kernel PDEs are replicated in each pmap but 3501 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel 3502 * pmap. 3503 */ 3504 static void 3505 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 3506 { 3507 pd_entry_t newpde; 3508 pt_entry_t *firstpte, oldpte, pa, *pte; 3509 vm_offset_t oldpteva; 3510 vm_page_t mpte; 3511 3512 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3513 3514 /* 3515 * Examine the first PTE in the specified PTP. Abort if this PTE is 3516 * either invalid, unused, or does not map the first 4KB physical page 3517 * within a 2- or 4MB page. 3518 */ 3519 firstpte = pmap_pte_quick(pmap, trunc_4mpage(va)); 3520 setpde: 3521 newpde = *firstpte; 3522 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 3523 pmap_pde_p_failures++; 3524 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3525 " in pmap %p", va, pmap); 3526 return; 3527 } 3528 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) { 3529 pmap_pde_p_failures++; 3530 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3531 " in pmap %p", va, pmap); 3532 return; 3533 } 3534 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 3535 /* 3536 * When PG_M is already clear, PG_RW can be cleared without 3537 * a TLB invalidation. 3538 */ 3539 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde & 3540 ~PG_RW)) 3541 goto setpde; 3542 newpde &= ~PG_RW; 3543 } 3544 3545 /* 3546 * Examine each of the other PTEs in the specified PTP. Abort if this 3547 * PTE maps an unexpected 4KB physical page or does not have identical 3548 * characteristics to the first PTE. 3549 */ 3550 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; 3551 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 3552 setpte: 3553 oldpte = *pte; 3554 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 3555 pmap_pde_p_failures++; 3556 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3557 " in pmap %p", va, pmap); 3558 return; 3559 } 3560 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 3561 /* 3562 * When PG_M is already clear, PG_RW can be cleared 3563 * without a TLB invalidation. 3564 */ 3565 if (!atomic_cmpset_int((u_int *)pte, oldpte, 3566 oldpte & ~PG_RW)) 3567 goto setpte; 3568 oldpte &= ~PG_RW; 3569 oldpteva = (oldpte & PG_FRAME & PDRMASK) | 3570 (va & ~PDRMASK); 3571 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x" 3572 " in pmap %p", oldpteva, pmap); 3573 } 3574 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 3575 pmap_pde_p_failures++; 3576 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3577 " in pmap %p", va, pmap); 3578 return; 3579 } 3580 pa -= PAGE_SIZE; 3581 } 3582 3583 /* 3584 * Save the page table page in its current state until the PDE 3585 * mapping the superpage is demoted by pmap_demote_pde() or 3586 * destroyed by pmap_remove_pde(). 3587 */ 3588 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3589 KASSERT(mpte >= vm_page_array && 3590 mpte < &vm_page_array[vm_page_array_size], 3591 ("pmap_promote_pde: page table page is out of range")); 3592 KASSERT(mpte->pindex == va >> PDRSHIFT, 3593 ("pmap_promote_pde: page table page's pindex is wrong")); 3594 if (pmap_insert_pt_page(pmap, mpte, true)) { 3595 pmap_pde_p_failures++; 3596 CTR2(KTR_PMAP, 3597 "pmap_promote_pde: failure for va %#x in pmap %p", va, 3598 pmap); 3599 return; 3600 } 3601 3602 /* 3603 * Promote the pv entries. 3604 */ 3605 if ((newpde & PG_MANAGED) != 0) 3606 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME); 3607 3608 /* 3609 * Propagate the PAT index to its proper position. 3610 */ 3611 if ((newpde & PG_PTE_PAT) != 0) 3612 newpde ^= PG_PDE_PAT | PG_PTE_PAT; 3613 3614 /* 3615 * Map the superpage. 3616 */ 3617 if (workaround_erratum383) 3618 pmap_update_pde(pmap, va, pde, PG_PS | newpde); 3619 else if (pmap == kernel_pmap) 3620 pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde); 3621 else 3622 pde_store(pde, PG_PROMOTED | PG_PS | newpde); 3623 3624 pmap_pde_promotions++; 3625 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" 3626 " in pmap %p", va, pmap); 3627 } 3628 #endif /* VM_NRESERVLEVEL > 0 */ 3629 3630 /* 3631 * Insert the given physical page (p) at 3632 * the specified virtual address (v) in the 3633 * target physical map with the protection requested. 3634 * 3635 * If specified, the page will be wired down, meaning 3636 * that the related pte can not be reclaimed. 3637 * 3638 * NB: This is the only routine which MAY NOT lazy-evaluate 3639 * or lose information. That is, this routine must actually 3640 * insert this page into the given map NOW. 3641 */ 3642 static int 3643 __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m, 3644 vm_prot_t prot, u_int flags, int8_t psind) 3645 { 3646 pd_entry_t *pde; 3647 pt_entry_t *pte; 3648 pt_entry_t newpte, origpte; 3649 pv_entry_t pv; 3650 vm_paddr_t opa, pa; 3651 vm_page_t mpte, om; 3652 int rv; 3653 3654 va = trunc_page(va); 3655 KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) || 3656 (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS), 3657 ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va)); 3658 KASSERT(va < PMAP_TRM_MIN_ADDRESS, 3659 ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", 3660 va)); 3661 KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || 3662 va < kmi.clean_sva || va >= kmi.clean_eva, 3663 ("pmap_enter: managed mapping within the clean submap")); 3664 if ((m->oflags & VPO_UNMANAGED) == 0) 3665 VM_PAGE_OBJECT_BUSY_ASSERT(m); 3666 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 3667 ("pmap_enter: flags %u has reserved bits set", flags)); 3668 pa = VM_PAGE_TO_PHYS(m); 3669 newpte = (pt_entry_t)(pa | PG_A | PG_V); 3670 if ((flags & VM_PROT_WRITE) != 0) 3671 newpte |= PG_M; 3672 if ((prot & VM_PROT_WRITE) != 0) 3673 newpte |= PG_RW; 3674 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 3675 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 3676 #ifdef PMAP_PAE_COMP 3677 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3678 newpte |= pg_nx; 3679 #endif 3680 if ((flags & PMAP_ENTER_WIRED) != 0) 3681 newpte |= PG_W; 3682 if (pmap != kernel_pmap) 3683 newpte |= PG_U; 3684 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); 3685 if ((m->oflags & VPO_UNMANAGED) == 0) 3686 newpte |= PG_MANAGED; 3687 3688 rw_wlock(&pvh_global_lock); 3689 PMAP_LOCK(pmap); 3690 sched_pin(); 3691 if (psind == 1) { 3692 /* Assert the required virtual and physical alignment. */ 3693 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned")); 3694 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); 3695 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m); 3696 goto out; 3697 } 3698 3699 pde = pmap_pde(pmap, va); 3700 if (pmap != kernel_pmap) { 3701 /* 3702 * va is for UVA. 3703 * In the case that a page table page is not resident, 3704 * we are creating it here. pmap_allocpte() handles 3705 * demotion. 3706 */ 3707 mpte = pmap_allocpte(pmap, va, flags); 3708 if (mpte == NULL) { 3709 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3710 ("pmap_allocpte failed with sleep allowed")); 3711 rv = KERN_RESOURCE_SHORTAGE; 3712 goto out; 3713 } 3714 } else { 3715 /* 3716 * va is for KVA, so pmap_demote_pde() will never fail 3717 * to install a page table page. PG_V is also 3718 * asserted by pmap_demote_pde(). 3719 */ 3720 mpte = NULL; 3721 KASSERT(pde != NULL && (*pde & PG_V) != 0, 3722 ("KVA %#x invalid pde pdir %#jx", va, 3723 (uintmax_t)pmap->pm_pdir[PTDPTDI])); 3724 if ((*pde & PG_PS) != 0) 3725 pmap_demote_pde(pmap, pde, va); 3726 } 3727 pte = pmap_pte_quick(pmap, va); 3728 3729 /* 3730 * Page Directory table entry is not valid, which should not 3731 * happen. We should have either allocated the page table 3732 * page or demoted the existing mapping above. 3733 */ 3734 if (pte == NULL) { 3735 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 3736 (uintmax_t)pmap->pm_pdir[PTDPTDI], va); 3737 } 3738 3739 origpte = *pte; 3740 pv = NULL; 3741 3742 /* 3743 * Is the specified virtual address already mapped? 3744 */ 3745 if ((origpte & PG_V) != 0) { 3746 /* 3747 * Wiring change, just update stats. We don't worry about 3748 * wiring PT pages as they remain resident as long as there 3749 * are valid mappings in them. Hence, if a user page is wired, 3750 * the PT page will be also. 3751 */ 3752 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 3753 pmap->pm_stats.wired_count++; 3754 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 3755 pmap->pm_stats.wired_count--; 3756 3757 /* 3758 * Remove the extra PT page reference. 3759 */ 3760 if (mpte != NULL) { 3761 mpte->ref_count--; 3762 KASSERT(mpte->ref_count > 0, 3763 ("pmap_enter: missing reference to page table page," 3764 " va: 0x%x", va)); 3765 } 3766 3767 /* 3768 * Has the physical page changed? 3769 */ 3770 opa = origpte & PG_FRAME; 3771 if (opa == pa) { 3772 /* 3773 * No, might be a protection or wiring change. 3774 */ 3775 if ((origpte & PG_MANAGED) != 0 && 3776 (newpte & PG_RW) != 0) 3777 vm_page_aflag_set(m, PGA_WRITEABLE); 3778 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 3779 goto unchanged; 3780 goto validate; 3781 } 3782 3783 /* 3784 * The physical page has changed. Temporarily invalidate 3785 * the mapping. This ensures that all threads sharing the 3786 * pmap keep a consistent view of the mapping, which is 3787 * necessary for the correct handling of COW faults. It 3788 * also permits reuse of the old mapping's PV entry, 3789 * avoiding an allocation. 3790 * 3791 * For consistency, handle unmanaged mappings the same way. 3792 */ 3793 origpte = pte_load_clear(pte); 3794 KASSERT((origpte & PG_FRAME) == opa, 3795 ("pmap_enter: unexpected pa update for %#x", va)); 3796 if ((origpte & PG_MANAGED) != 0) { 3797 om = PHYS_TO_VM_PAGE(opa); 3798 3799 /* 3800 * The pmap lock is sufficient to synchronize with 3801 * concurrent calls to pmap_page_test_mappings() and 3802 * pmap_ts_referenced(). 3803 */ 3804 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3805 vm_page_dirty(om); 3806 if ((origpte & PG_A) != 0) { 3807 pmap_invalidate_page_int(pmap, va); 3808 vm_page_aflag_set(om, PGA_REFERENCED); 3809 } 3810 pv = pmap_pvh_remove(&om->md, pmap, va); 3811 KASSERT(pv != NULL, 3812 ("pmap_enter: no PV entry for %#x", va)); 3813 if ((newpte & PG_MANAGED) == 0) 3814 free_pv_entry(pmap, pv); 3815 if ((om->a.flags & PGA_WRITEABLE) != 0 && 3816 TAILQ_EMPTY(&om->md.pv_list) && 3817 ((om->flags & PG_FICTITIOUS) != 0 || 3818 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 3819 vm_page_aflag_clear(om, PGA_WRITEABLE); 3820 } else { 3821 /* 3822 * Since this mapping is unmanaged, assume that PG_A 3823 * is set. 3824 */ 3825 pmap_invalidate_page_int(pmap, va); 3826 } 3827 origpte = 0; 3828 } else { 3829 /* 3830 * Increment the counters. 3831 */ 3832 if ((newpte & PG_W) != 0) 3833 pmap->pm_stats.wired_count++; 3834 pmap->pm_stats.resident_count++; 3835 } 3836 3837 /* 3838 * Enter on the PV list if part of our managed memory. 3839 */ 3840 if ((newpte & PG_MANAGED) != 0) { 3841 if (pv == NULL) { 3842 pv = get_pv_entry(pmap, FALSE); 3843 pv->pv_va = va; 3844 } 3845 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3846 if ((newpte & PG_RW) != 0) 3847 vm_page_aflag_set(m, PGA_WRITEABLE); 3848 } 3849 3850 /* 3851 * Update the PTE. 3852 */ 3853 if ((origpte & PG_V) != 0) { 3854 validate: 3855 origpte = pte_load_store(pte, newpte); 3856 KASSERT((origpte & PG_FRAME) == pa, 3857 ("pmap_enter: unexpected pa update for %#x", va)); 3858 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == 3859 (PG_M | PG_RW)) { 3860 if ((origpte & PG_MANAGED) != 0) 3861 vm_page_dirty(m); 3862 3863 /* 3864 * Although the PTE may still have PG_RW set, TLB 3865 * invalidation may nonetheless be required because 3866 * the PTE no longer has PG_M set. 3867 */ 3868 } 3869 #ifdef PMAP_PAE_COMP 3870 else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { 3871 /* 3872 * This PTE change does not require TLB invalidation. 3873 */ 3874 goto unchanged; 3875 } 3876 #endif 3877 if ((origpte & PG_A) != 0) 3878 pmap_invalidate_page_int(pmap, va); 3879 } else 3880 pte_store_zero(pte, newpte); 3881 3882 unchanged: 3883 3884 #if VM_NRESERVLEVEL > 0 3885 /* 3886 * If both the page table page and the reservation are fully 3887 * populated, then attempt promotion. 3888 */ 3889 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 3890 pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 && 3891 vm_reserv_level_iffullpop(m) == 0) 3892 pmap_promote_pde(pmap, pde, va); 3893 #endif 3894 3895 rv = KERN_SUCCESS; 3896 out: 3897 sched_unpin(); 3898 rw_wunlock(&pvh_global_lock); 3899 PMAP_UNLOCK(pmap); 3900 return (rv); 3901 } 3902 3903 /* 3904 * Tries to create a read- and/or execute-only 2 or 4 MB page mapping. Returns 3905 * true if successful. Returns false if (1) a mapping already exists at the 3906 * specified virtual address or (2) a PV entry cannot be allocated without 3907 * reclaiming another PV entry. 3908 */ 3909 static bool 3910 pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3911 { 3912 pd_entry_t newpde; 3913 3914 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3915 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | 3916 PG_PS | PG_V; 3917 if ((m->oflags & VPO_UNMANAGED) == 0) 3918 newpde |= PG_MANAGED; 3919 #ifdef PMAP_PAE_COMP 3920 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 3921 newpde |= pg_nx; 3922 #endif 3923 if (pmap != kernel_pmap) 3924 newpde |= PG_U; 3925 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | 3926 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL) == 3927 KERN_SUCCESS); 3928 } 3929 3930 /* 3931 * Returns true if every page table entry in the page table page that maps 3932 * the specified kernel virtual address is zero. 3933 */ 3934 static bool 3935 pmap_every_pte_zero(vm_offset_t va) 3936 { 3937 pt_entry_t *pt_end, *pte; 3938 3939 KASSERT((va & PDRMASK) == 0, ("va is misaligned")); 3940 pte = vtopte(va); 3941 for (pt_end = pte + NPTEPG; pte < pt_end; pte++) { 3942 if (*pte != 0) 3943 return (false); 3944 } 3945 return (true); 3946 } 3947 3948 /* 3949 * Tries to create the specified 2 or 4 MB page mapping. Returns KERN_SUCCESS 3950 * if the mapping was created, and either KERN_FAILURE or 3951 * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if 3952 * PMAP_ENTER_NOREPLACE was specified and a mapping already exists at the 3953 * specified virtual address. Returns KERN_RESOURCE_SHORTAGE if 3954 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. 3955 * 3956 * The parameter "m" is only used when creating a managed, writeable mapping. 3957 */ 3958 static int 3959 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, 3960 vm_page_t m) 3961 { 3962 struct spglist free; 3963 pd_entry_t oldpde, *pde; 3964 vm_page_t mt; 3965 3966 rw_assert(&pvh_global_lock, RA_WLOCKED); 3967 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, 3968 ("pmap_enter_pde: newpde is missing PG_M")); 3969 KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0, 3970 ("pmap_enter_pde: cannot create wired user mapping")); 3971 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3972 pde = pmap_pde(pmap, va); 3973 oldpde = *pde; 3974 if ((oldpde & PG_V) != 0) { 3975 if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (pmap != 3976 kernel_pmap || (oldpde & PG_PS) != 0 || 3977 !pmap_every_pte_zero(va))) { 3978 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3979 " in pmap %p", va, pmap); 3980 return (KERN_FAILURE); 3981 } 3982 /* Break the existing mapping(s). */ 3983 SLIST_INIT(&free); 3984 if ((oldpde & PG_PS) != 0) { 3985 /* 3986 * If the PDE resulted from a promotion, then a 3987 * reserved PT page could be freed. 3988 */ 3989 (void)pmap_remove_pde(pmap, pde, va, &free); 3990 if ((oldpde & PG_G) == 0) 3991 pmap_invalidate_pde_page(pmap, va, oldpde); 3992 } else { 3993 if (pmap_remove_ptes(pmap, va, va + NBPDR, &free)) 3994 pmap_invalidate_all_int(pmap); 3995 } 3996 if (pmap != kernel_pmap) { 3997 vm_page_free_pages_toq(&free, true); 3998 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", 3999 pde)); 4000 } else { 4001 KASSERT(SLIST_EMPTY(&free), 4002 ("pmap_enter_pde: freed kernel page table page")); 4003 4004 /* 4005 * Both pmap_remove_pde() and pmap_remove_ptes() will 4006 * leave the kernel page table page zero filled. 4007 */ 4008 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 4009 if (pmap_insert_pt_page(pmap, mt, false)) 4010 panic("pmap_enter_pde: trie insert failed"); 4011 } 4012 } 4013 if ((newpde & PG_MANAGED) != 0) { 4014 /* 4015 * Abort this mapping if its PV entry could not be created. 4016 */ 4017 if (!pmap_pv_insert_pde(pmap, va, newpde, flags)) { 4018 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 4019 " in pmap %p", va, pmap); 4020 return (KERN_RESOURCE_SHORTAGE); 4021 } 4022 if ((newpde & PG_RW) != 0) { 4023 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4024 vm_page_aflag_set(mt, PGA_WRITEABLE); 4025 } 4026 } 4027 4028 /* 4029 * Increment counters. 4030 */ 4031 if ((newpde & PG_W) != 0) 4032 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; 4033 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; 4034 4035 /* 4036 * Map the superpage. (This is not a promoted mapping; there will not 4037 * be any lingering 4KB page mappings in the TLB.) 4038 */ 4039 pde_store(pde, newpde); 4040 4041 pmap_pde_mappings++; 4042 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p", 4043 va, pmap); 4044 return (KERN_SUCCESS); 4045 } 4046 4047 /* 4048 * Maps a sequence of resident pages belonging to the same object. 4049 * The sequence begins with the given page m_start. This page is 4050 * mapped at the given virtual address start. Each subsequent page is 4051 * mapped at a virtual address that is offset from start by the same 4052 * amount as the page is offset from m_start within the object. The 4053 * last page in the sequence is the page with the largest offset from 4054 * m_start that can be mapped at a virtual address less than the given 4055 * virtual address end. Not every virtual page between start and end 4056 * is mapped; only those for which a resident page exists with the 4057 * corresponding offset from m_start are mapped. 4058 */ 4059 static void 4060 __CONCAT(PMTYPE, enter_object)(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4061 vm_page_t m_start, vm_prot_t prot) 4062 { 4063 vm_offset_t va; 4064 vm_page_t m, mpte; 4065 vm_pindex_t diff, psize; 4066 4067 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4068 4069 psize = atop(end - start); 4070 mpte = NULL; 4071 m = m_start; 4072 rw_wlock(&pvh_global_lock); 4073 PMAP_LOCK(pmap); 4074 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4075 va = start + ptoa(diff); 4076 if ((va & PDRMASK) == 0 && va + NBPDR <= end && 4077 m->psind == 1 && pg_ps_enabled && 4078 pmap_enter_4mpage(pmap, va, m, prot)) 4079 m = &m[NBPDR / PAGE_SIZE - 1]; 4080 else 4081 mpte = pmap_enter_quick_locked(pmap, va, m, prot, 4082 mpte); 4083 m = TAILQ_NEXT(m, listq); 4084 } 4085 rw_wunlock(&pvh_global_lock); 4086 PMAP_UNLOCK(pmap); 4087 } 4088 4089 /* 4090 * this code makes some *MAJOR* assumptions: 4091 * 1. Current pmap & pmap exists. 4092 * 2. Not wired. 4093 * 3. Read access. 4094 * 4. No page table pages. 4095 * but is *MUCH* faster than pmap_enter... 4096 */ 4097 4098 static void 4099 __CONCAT(PMTYPE, enter_quick)(pmap_t pmap, vm_offset_t va, vm_page_t m, 4100 vm_prot_t prot) 4101 { 4102 4103 rw_wlock(&pvh_global_lock); 4104 PMAP_LOCK(pmap); 4105 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4106 rw_wunlock(&pvh_global_lock); 4107 PMAP_UNLOCK(pmap); 4108 } 4109 4110 static vm_page_t 4111 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4112 vm_prot_t prot, vm_page_t mpte) 4113 { 4114 pt_entry_t newpte, *pte; 4115 4116 KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || 4117 va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, 4118 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 4119 rw_assert(&pvh_global_lock, RA_WLOCKED); 4120 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4121 4122 /* 4123 * In the case that a page table page is not 4124 * resident, we are creating it here. 4125 */ 4126 if (pmap != kernel_pmap) { 4127 u_int ptepindex; 4128 pd_entry_t ptepa; 4129 4130 /* 4131 * Calculate pagetable page index 4132 */ 4133 ptepindex = va >> PDRSHIFT; 4134 if (mpte && (mpte->pindex == ptepindex)) { 4135 mpte->ref_count++; 4136 } else { 4137 /* 4138 * Get the page directory entry 4139 */ 4140 ptepa = pmap->pm_pdir[ptepindex]; 4141 4142 /* 4143 * If the page table page is mapped, we just increment 4144 * the hold count, and activate it. 4145 */ 4146 if (ptepa) { 4147 if (ptepa & PG_PS) 4148 return (NULL); 4149 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 4150 mpte->ref_count++; 4151 } else { 4152 mpte = _pmap_allocpte(pmap, ptepindex, 4153 PMAP_ENTER_NOSLEEP); 4154 if (mpte == NULL) 4155 return (mpte); 4156 } 4157 } 4158 } else { 4159 mpte = NULL; 4160 } 4161 4162 sched_pin(); 4163 pte = pmap_pte_quick(pmap, va); 4164 if (*pte) { 4165 if (mpte != NULL) 4166 mpte->ref_count--; 4167 sched_unpin(); 4168 return (NULL); 4169 } 4170 4171 /* 4172 * Enter on the PV list if part of our managed memory. 4173 */ 4174 if ((m->oflags & VPO_UNMANAGED) == 0 && 4175 !pmap_try_insert_pv_entry(pmap, va, m)) { 4176 if (mpte != NULL) 4177 pmap_abort_ptp(pmap, va, mpte); 4178 sched_unpin(); 4179 return (NULL); 4180 } 4181 4182 /* 4183 * Increment counters 4184 */ 4185 pmap->pm_stats.resident_count++; 4186 4187 newpte = VM_PAGE_TO_PHYS(m) | PG_V | 4188 pmap_cache_bits(pmap, m->md.pat_mode, 0); 4189 if ((m->oflags & VPO_UNMANAGED) == 0) 4190 newpte |= PG_MANAGED; 4191 #ifdef PMAP_PAE_COMP 4192 if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) 4193 newpte |= pg_nx; 4194 #endif 4195 if (pmap != kernel_pmap) 4196 newpte |= PG_U; 4197 pte_store_zero(pte, newpte); 4198 sched_unpin(); 4199 return (mpte); 4200 } 4201 4202 /* 4203 * Make a temporary mapping for a physical address. This is only intended 4204 * to be used for panic dumps. 4205 */ 4206 static void * 4207 __CONCAT(PMTYPE, kenter_temporary)(vm_paddr_t pa, int i) 4208 { 4209 vm_offset_t va; 4210 4211 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 4212 pmap_kenter(va, pa); 4213 invlpg(va); 4214 return ((void *)crashdumpmap); 4215 } 4216 4217 /* 4218 * This code maps large physical mmap regions into the 4219 * processor address space. Note that some shortcuts 4220 * are taken, but the code works. 4221 */ 4222 static void 4223 __CONCAT(PMTYPE, object_init_pt)(pmap_t pmap, vm_offset_t addr, 4224 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 4225 { 4226 pd_entry_t *pde; 4227 vm_paddr_t pa, ptepa; 4228 vm_page_t p; 4229 int pat_mode; 4230 4231 VM_OBJECT_ASSERT_WLOCKED(object); 4232 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4233 ("pmap_object_init_pt: non-device object")); 4234 if (pg_ps_enabled && 4235 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 4236 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4237 return; 4238 p = vm_page_lookup(object, pindex); 4239 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4240 ("pmap_object_init_pt: invalid page %p", p)); 4241 pat_mode = p->md.pat_mode; 4242 4243 /* 4244 * Abort the mapping if the first page is not physically 4245 * aligned to a 2/4MB page boundary. 4246 */ 4247 ptepa = VM_PAGE_TO_PHYS(p); 4248 if (ptepa & (NBPDR - 1)) 4249 return; 4250 4251 /* 4252 * Skip the first page. Abort the mapping if the rest of 4253 * the pages are not physically contiguous or have differing 4254 * memory attributes. 4255 */ 4256 p = TAILQ_NEXT(p, listq); 4257 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 4258 pa += PAGE_SIZE) { 4259 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4260 ("pmap_object_init_pt: invalid page %p", p)); 4261 if (pa != VM_PAGE_TO_PHYS(p) || 4262 pat_mode != p->md.pat_mode) 4263 return; 4264 p = TAILQ_NEXT(p, listq); 4265 } 4266 4267 /* 4268 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 4269 * "size" is a multiple of 2/4M, adding the PAT setting to 4270 * "pa" will not affect the termination of this loop. 4271 */ 4272 PMAP_LOCK(pmap); 4273 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); 4274 pa < ptepa + size; pa += NBPDR) { 4275 pde = pmap_pde(pmap, addr); 4276 if (*pde == 0) { 4277 pde_store(pde, pa | PG_PS | PG_M | PG_A | 4278 PG_U | PG_RW | PG_V); 4279 pmap->pm_stats.resident_count += NBPDR / 4280 PAGE_SIZE; 4281 pmap_pde_mappings++; 4282 } 4283 /* Else continue on if the PDE is already valid. */ 4284 addr += NBPDR; 4285 } 4286 PMAP_UNLOCK(pmap); 4287 } 4288 } 4289 4290 /* 4291 * Clear the wired attribute from the mappings for the specified range of 4292 * addresses in the given pmap. Every valid mapping within that range 4293 * must have the wired attribute set. In contrast, invalid mappings 4294 * cannot have the wired attribute set, so they are ignored. 4295 * 4296 * The wired attribute of the page table entry is not a hardware feature, 4297 * so there is no need to invalidate any TLB entries. 4298 */ 4299 static void 4300 __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4301 { 4302 vm_offset_t pdnxt; 4303 pd_entry_t *pde; 4304 pt_entry_t *pte; 4305 boolean_t pv_lists_locked; 4306 4307 if (pmap_is_current(pmap)) 4308 pv_lists_locked = FALSE; 4309 else { 4310 pv_lists_locked = TRUE; 4311 resume: 4312 rw_wlock(&pvh_global_lock); 4313 sched_pin(); 4314 } 4315 PMAP_LOCK(pmap); 4316 for (; sva < eva; sva = pdnxt) { 4317 pdnxt = (sva + NBPDR) & ~PDRMASK; 4318 if (pdnxt < sva) 4319 pdnxt = eva; 4320 pde = pmap_pde(pmap, sva); 4321 if ((*pde & PG_V) == 0) 4322 continue; 4323 if ((*pde & PG_PS) != 0) { 4324 if ((*pde & PG_W) == 0) 4325 panic("pmap_unwire: pde %#jx is missing PG_W", 4326 (uintmax_t)*pde); 4327 4328 /* 4329 * Are we unwiring the entire large page? If not, 4330 * demote the mapping and fall through. 4331 */ 4332 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 4333 /* 4334 * Regardless of whether a pde (or pte) is 32 4335 * or 64 bits in size, PG_W is among the least 4336 * significant 32 bits. 4337 */ 4338 atomic_clear_int((u_int *)pde, PG_W); 4339 pmap->pm_stats.wired_count -= NBPDR / 4340 PAGE_SIZE; 4341 continue; 4342 } else { 4343 if (!pv_lists_locked) { 4344 pv_lists_locked = TRUE; 4345 if (!rw_try_wlock(&pvh_global_lock)) { 4346 PMAP_UNLOCK(pmap); 4347 /* Repeat sva. */ 4348 goto resume; 4349 } 4350 sched_pin(); 4351 } 4352 if (!pmap_demote_pde(pmap, pde, sva)) 4353 panic("pmap_unwire: demotion failed"); 4354 } 4355 } 4356 if (pdnxt > eva) 4357 pdnxt = eva; 4358 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 4359 sva += PAGE_SIZE) { 4360 if ((*pte & PG_V) == 0) 4361 continue; 4362 if ((*pte & PG_W) == 0) 4363 panic("pmap_unwire: pte %#jx is missing PG_W", 4364 (uintmax_t)*pte); 4365 4366 /* 4367 * PG_W must be cleared atomically. Although the pmap 4368 * lock synchronizes access to PG_W, another processor 4369 * could be setting PG_M and/or PG_A concurrently. 4370 * 4371 * PG_W is among the least significant 32 bits. 4372 */ 4373 atomic_clear_int((u_int *)pte, PG_W); 4374 pmap->pm_stats.wired_count--; 4375 } 4376 } 4377 if (pv_lists_locked) { 4378 sched_unpin(); 4379 rw_wunlock(&pvh_global_lock); 4380 } 4381 PMAP_UNLOCK(pmap); 4382 } 4383 4384 4385 /* 4386 * Copy the range specified by src_addr/len 4387 * from the source map to the range dst_addr/len 4388 * in the destination map. 4389 * 4390 * This routine is only advisory and need not do anything. Since 4391 * current pmap is always the kernel pmap when executing in 4392 * kernel, and we do not copy from the kernel pmap to a user 4393 * pmap, this optimization is not usable in 4/4G full split i386 4394 * world. 4395 */ 4396 4397 static void 4398 __CONCAT(PMTYPE, copy)(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 4399 vm_size_t len, vm_offset_t src_addr) 4400 { 4401 pt_entry_t *src_pte, *dst_pte, ptetemp; 4402 pd_entry_t srcptepaddr; 4403 vm_page_t dstmpte, srcmpte; 4404 vm_offset_t addr, end_addr, pdnxt; 4405 u_int ptepindex; 4406 4407 if (dst_addr != src_addr) 4408 return; 4409 4410 end_addr = src_addr + len; 4411 4412 rw_wlock(&pvh_global_lock); 4413 if (dst_pmap < src_pmap) { 4414 PMAP_LOCK(dst_pmap); 4415 PMAP_LOCK(src_pmap); 4416 } else { 4417 PMAP_LOCK(src_pmap); 4418 PMAP_LOCK(dst_pmap); 4419 } 4420 sched_pin(); 4421 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 4422 KASSERT(addr < PMAP_TRM_MIN_ADDRESS, 4423 ("pmap_copy: invalid to pmap_copy the trampoline")); 4424 4425 pdnxt = (addr + NBPDR) & ~PDRMASK; 4426 if (pdnxt < addr) 4427 pdnxt = end_addr; 4428 ptepindex = addr >> PDRSHIFT; 4429 4430 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 4431 if (srcptepaddr == 0) 4432 continue; 4433 4434 if (srcptepaddr & PG_PS) { 4435 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) 4436 continue; 4437 if (dst_pmap->pm_pdir[ptepindex] == 0 && 4438 ((srcptepaddr & PG_MANAGED) == 0 || 4439 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, 4440 PMAP_ENTER_NORECLAIM))) { 4441 dst_pmap->pm_pdir[ptepindex] = srcptepaddr & 4442 ~PG_W; 4443 dst_pmap->pm_stats.resident_count += 4444 NBPDR / PAGE_SIZE; 4445 pmap_pde_mappings++; 4446 } 4447 continue; 4448 } 4449 4450 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 4451 KASSERT(srcmpte->ref_count > 0, 4452 ("pmap_copy: source page table page is unused")); 4453 4454 if (pdnxt > end_addr) 4455 pdnxt = end_addr; 4456 4457 src_pte = pmap_pte_quick3(src_pmap, addr); 4458 while (addr < pdnxt) { 4459 ptetemp = *src_pte; 4460 /* 4461 * we only virtual copy managed pages 4462 */ 4463 if ((ptetemp & PG_MANAGED) != 0) { 4464 dstmpte = pmap_allocpte(dst_pmap, addr, 4465 PMAP_ENTER_NOSLEEP); 4466 if (dstmpte == NULL) 4467 goto out; 4468 dst_pte = pmap_pte_quick(dst_pmap, addr); 4469 if (*dst_pte == 0 && 4470 pmap_try_insert_pv_entry(dst_pmap, addr, 4471 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { 4472 /* 4473 * Clear the wired, modified, and 4474 * accessed (referenced) bits 4475 * during the copy. 4476 */ 4477 *dst_pte = ptetemp & ~(PG_W | PG_M | 4478 PG_A); 4479 dst_pmap->pm_stats.resident_count++; 4480 } else { 4481 pmap_abort_ptp(dst_pmap, addr, dstmpte); 4482 goto out; 4483 } 4484 if (dstmpte->ref_count >= srcmpte->ref_count) 4485 break; 4486 } 4487 addr += PAGE_SIZE; 4488 src_pte++; 4489 } 4490 } 4491 out: 4492 sched_unpin(); 4493 rw_wunlock(&pvh_global_lock); 4494 PMAP_UNLOCK(src_pmap); 4495 PMAP_UNLOCK(dst_pmap); 4496 } 4497 4498 /* 4499 * Zero 1 page of virtual memory mapped from a hardware page by the caller. 4500 */ 4501 static __inline void 4502 pagezero(void *page) 4503 { 4504 #if defined(I686_CPU) 4505 if (cpu_class == CPUCLASS_686) { 4506 if (cpu_feature & CPUID_SSE2) 4507 sse2_pagezero(page); 4508 else 4509 i686_pagezero(page); 4510 } else 4511 #endif 4512 bzero(page, PAGE_SIZE); 4513 } 4514 4515 /* 4516 * Zero the specified hardware page. 4517 */ 4518 static void 4519 __CONCAT(PMTYPE, zero_page)(vm_page_t m) 4520 { 4521 pt_entry_t *cmap_pte2; 4522 struct pcpu *pc; 4523 4524 sched_pin(); 4525 pc = get_pcpu(); 4526 cmap_pte2 = pc->pc_cmap_pte2; 4527 mtx_lock(&pc->pc_cmap_lock); 4528 if (*cmap_pte2) 4529 panic("pmap_zero_page: CMAP2 busy"); 4530 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4531 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4532 invlcaddr(pc->pc_cmap_addr2); 4533 pagezero(pc->pc_cmap_addr2); 4534 *cmap_pte2 = 0; 4535 4536 /* 4537 * Unpin the thread before releasing the lock. Otherwise the thread 4538 * could be rescheduled while still bound to the current CPU, only 4539 * to unpin itself immediately upon resuming execution. 4540 */ 4541 sched_unpin(); 4542 mtx_unlock(&pc->pc_cmap_lock); 4543 } 4544 4545 /* 4546 * Zero an an area within a single hardware page. off and size must not 4547 * cover an area beyond a single hardware page. 4548 */ 4549 static void 4550 __CONCAT(PMTYPE, zero_page_area)(vm_page_t m, int off, int size) 4551 { 4552 pt_entry_t *cmap_pte2; 4553 struct pcpu *pc; 4554 4555 sched_pin(); 4556 pc = get_pcpu(); 4557 cmap_pte2 = pc->pc_cmap_pte2; 4558 mtx_lock(&pc->pc_cmap_lock); 4559 if (*cmap_pte2) 4560 panic("pmap_zero_page_area: CMAP2 busy"); 4561 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 4562 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); 4563 invlcaddr(pc->pc_cmap_addr2); 4564 if (off == 0 && size == PAGE_SIZE) 4565 pagezero(pc->pc_cmap_addr2); 4566 else 4567 bzero(pc->pc_cmap_addr2 + off, size); 4568 *cmap_pte2 = 0; 4569 sched_unpin(); 4570 mtx_unlock(&pc->pc_cmap_lock); 4571 } 4572 4573 /* 4574 * Copy 1 specified hardware page to another. 4575 */ 4576 static void 4577 __CONCAT(PMTYPE, copy_page)(vm_page_t src, vm_page_t dst) 4578 { 4579 pt_entry_t *cmap_pte1, *cmap_pte2; 4580 struct pcpu *pc; 4581 4582 sched_pin(); 4583 pc = get_pcpu(); 4584 cmap_pte1 = pc->pc_cmap_pte1; 4585 cmap_pte2 = pc->pc_cmap_pte2; 4586 mtx_lock(&pc->pc_cmap_lock); 4587 if (*cmap_pte1) 4588 panic("pmap_copy_page: CMAP1 busy"); 4589 if (*cmap_pte2) 4590 panic("pmap_copy_page: CMAP2 busy"); 4591 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | 4592 pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0); 4593 invlcaddr(pc->pc_cmap_addr1); 4594 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | 4595 pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0); 4596 invlcaddr(pc->pc_cmap_addr2); 4597 bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE); 4598 *cmap_pte1 = 0; 4599 *cmap_pte2 = 0; 4600 sched_unpin(); 4601 mtx_unlock(&pc->pc_cmap_lock); 4602 } 4603 4604 static void 4605 __CONCAT(PMTYPE, copy_pages)(vm_page_t ma[], vm_offset_t a_offset, 4606 vm_page_t mb[], vm_offset_t b_offset, int xfersize) 4607 { 4608 vm_page_t a_pg, b_pg; 4609 char *a_cp, *b_cp; 4610 vm_offset_t a_pg_offset, b_pg_offset; 4611 pt_entry_t *cmap_pte1, *cmap_pte2; 4612 struct pcpu *pc; 4613 int cnt; 4614 4615 sched_pin(); 4616 pc = get_pcpu(); 4617 cmap_pte1 = pc->pc_cmap_pte1; 4618 cmap_pte2 = pc->pc_cmap_pte2; 4619 mtx_lock(&pc->pc_cmap_lock); 4620 if (*cmap_pte1 != 0) 4621 panic("pmap_copy_pages: CMAP1 busy"); 4622 if (*cmap_pte2 != 0) 4623 panic("pmap_copy_pages: CMAP2 busy"); 4624 while (xfersize > 0) { 4625 a_pg = ma[a_offset >> PAGE_SHIFT]; 4626 a_pg_offset = a_offset & PAGE_MASK; 4627 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 4628 b_pg = mb[b_offset >> PAGE_SHIFT]; 4629 b_pg_offset = b_offset & PAGE_MASK; 4630 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 4631 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | 4632 pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0); 4633 invlcaddr(pc->pc_cmap_addr1); 4634 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | 4635 PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0); 4636 invlcaddr(pc->pc_cmap_addr2); 4637 a_cp = pc->pc_cmap_addr1 + a_pg_offset; 4638 b_cp = pc->pc_cmap_addr2 + b_pg_offset; 4639 bcopy(a_cp, b_cp, cnt); 4640 a_offset += cnt; 4641 b_offset += cnt; 4642 xfersize -= cnt; 4643 } 4644 *cmap_pte1 = 0; 4645 *cmap_pte2 = 0; 4646 sched_unpin(); 4647 mtx_unlock(&pc->pc_cmap_lock); 4648 } 4649 4650 /* 4651 * Returns true if the pmap's pv is one of the first 4652 * 16 pvs linked to from this page. This count may 4653 * be changed upwards or downwards in the future; it 4654 * is only necessary that true be returned for a small 4655 * subset of pmaps for proper page aging. 4656 */ 4657 static boolean_t 4658 __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m) 4659 { 4660 struct md_page *pvh; 4661 pv_entry_t pv; 4662 int loops = 0; 4663 boolean_t rv; 4664 4665 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4666 ("pmap_page_exists_quick: page %p is not managed", m)); 4667 rv = FALSE; 4668 rw_wlock(&pvh_global_lock); 4669 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 4670 if (PV_PMAP(pv) == pmap) { 4671 rv = TRUE; 4672 break; 4673 } 4674 loops++; 4675 if (loops >= 16) 4676 break; 4677 } 4678 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 4679 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4680 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4681 if (PV_PMAP(pv) == pmap) { 4682 rv = TRUE; 4683 break; 4684 } 4685 loops++; 4686 if (loops >= 16) 4687 break; 4688 } 4689 } 4690 rw_wunlock(&pvh_global_lock); 4691 return (rv); 4692 } 4693 4694 /* 4695 * pmap_page_wired_mappings: 4696 * 4697 * Return the number of managed mappings to the given physical page 4698 * that are wired. 4699 */ 4700 static int 4701 __CONCAT(PMTYPE, page_wired_mappings)(vm_page_t m) 4702 { 4703 int count; 4704 4705 count = 0; 4706 if ((m->oflags & VPO_UNMANAGED) != 0) 4707 return (count); 4708 rw_wlock(&pvh_global_lock); 4709 count = pmap_pvh_wired_mappings(&m->md, count); 4710 if ((m->flags & PG_FICTITIOUS) == 0) { 4711 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 4712 count); 4713 } 4714 rw_wunlock(&pvh_global_lock); 4715 return (count); 4716 } 4717 4718 /* 4719 * pmap_pvh_wired_mappings: 4720 * 4721 * Return the updated number "count" of managed mappings that are wired. 4722 */ 4723 static int 4724 pmap_pvh_wired_mappings(struct md_page *pvh, int count) 4725 { 4726 pmap_t pmap; 4727 pt_entry_t *pte; 4728 pv_entry_t pv; 4729 4730 rw_assert(&pvh_global_lock, RA_WLOCKED); 4731 sched_pin(); 4732 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4733 pmap = PV_PMAP(pv); 4734 PMAP_LOCK(pmap); 4735 pte = pmap_pte_quick(pmap, pv->pv_va); 4736 if ((*pte & PG_W) != 0) 4737 count++; 4738 PMAP_UNLOCK(pmap); 4739 } 4740 sched_unpin(); 4741 return (count); 4742 } 4743 4744 /* 4745 * Returns TRUE if the given page is mapped individually or as part of 4746 * a 4mpage. Otherwise, returns FALSE. 4747 */ 4748 static boolean_t 4749 __CONCAT(PMTYPE, page_is_mapped)(vm_page_t m) 4750 { 4751 boolean_t rv; 4752 4753 if ((m->oflags & VPO_UNMANAGED) != 0) 4754 return (FALSE); 4755 rw_wlock(&pvh_global_lock); 4756 rv = !TAILQ_EMPTY(&m->md.pv_list) || 4757 ((m->flags & PG_FICTITIOUS) == 0 && 4758 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 4759 rw_wunlock(&pvh_global_lock); 4760 return (rv); 4761 } 4762 4763 /* 4764 * Remove all pages from specified address space 4765 * this aids process exit speeds. Also, this code 4766 * is special cased for current process only, but 4767 * can have the more generic (and slightly slower) 4768 * mode enabled. This is much faster than pmap_remove 4769 * in the case of running down an entire address space. 4770 */ 4771 static void 4772 __CONCAT(PMTYPE, remove_pages)(pmap_t pmap) 4773 { 4774 pt_entry_t *pte, tpte; 4775 vm_page_t m, mpte, mt; 4776 pv_entry_t pv; 4777 struct md_page *pvh; 4778 struct pv_chunk *pc, *npc; 4779 struct spglist free; 4780 int field, idx; 4781 int32_t bit; 4782 uint32_t inuse, bitmask; 4783 int allfree; 4784 4785 if (pmap != PCPU_GET(curpmap)) { 4786 printf("warning: pmap_remove_pages called with non-current pmap\n"); 4787 return; 4788 } 4789 SLIST_INIT(&free); 4790 rw_wlock(&pvh_global_lock); 4791 PMAP_LOCK(pmap); 4792 sched_pin(); 4793 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4794 KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, 4795 pc->pc_pmap)); 4796 allfree = 1; 4797 for (field = 0; field < _NPCM; field++) { 4798 inuse = ~pc->pc_map[field] & pc_freemask[field]; 4799 while (inuse != 0) { 4800 bit = bsfl(inuse); 4801 bitmask = 1UL << bit; 4802 idx = field * 32 + bit; 4803 pv = &pc->pc_pventry[idx]; 4804 inuse &= ~bitmask; 4805 4806 pte = pmap_pde(pmap, pv->pv_va); 4807 tpte = *pte; 4808 if ((tpte & PG_PS) == 0) { 4809 pte = pmap_pte_quick(pmap, pv->pv_va); 4810 tpte = *pte & ~PG_PTE_PAT; 4811 } 4812 4813 if (tpte == 0) { 4814 printf( 4815 "TPTE at %p IS ZERO @ VA %08x\n", 4816 pte, pv->pv_va); 4817 panic("bad pte"); 4818 } 4819 4820 /* 4821 * We cannot remove wired pages from a process' mapping at this time 4822 */ 4823 if (tpte & PG_W) { 4824 allfree = 0; 4825 continue; 4826 } 4827 4828 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 4829 KASSERT(m->phys_addr == (tpte & PG_FRAME), 4830 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 4831 m, (uintmax_t)m->phys_addr, 4832 (uintmax_t)tpte)); 4833 4834 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4835 m < &vm_page_array[vm_page_array_size], 4836 ("pmap_remove_pages: bad tpte %#jx", 4837 (uintmax_t)tpte)); 4838 4839 pte_clear(pte); 4840 4841 /* 4842 * Update the vm_page_t clean/reference bits. 4843 */ 4844 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 4845 if ((tpte & PG_PS) != 0) { 4846 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4847 vm_page_dirty(mt); 4848 } else 4849 vm_page_dirty(m); 4850 } 4851 4852 /* Mark free */ 4853 PV_STAT(pv_entry_frees++); 4854 PV_STAT(pv_entry_spare++); 4855 pv_entry_count--; 4856 pc->pc_map[field] |= bitmask; 4857 if ((tpte & PG_PS) != 0) { 4858 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 4859 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 4860 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4861 if (TAILQ_EMPTY(&pvh->pv_list)) { 4862 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4863 if (TAILQ_EMPTY(&mt->md.pv_list)) 4864 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4865 } 4866 mpte = pmap_remove_pt_page(pmap, pv->pv_va); 4867 if (mpte != NULL) { 4868 KASSERT(mpte->valid == VM_PAGE_BITS_ALL, 4869 ("pmap_remove_pages: pte page not promoted")); 4870 pmap->pm_stats.resident_count--; 4871 KASSERT(mpte->ref_count == NPTEPG, 4872 ("pmap_remove_pages: pte page ref count error")); 4873 mpte->ref_count = 0; 4874 pmap_add_delayed_free_list(mpte, &free, FALSE); 4875 } 4876 } else { 4877 pmap->pm_stats.resident_count--; 4878 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4879 if (TAILQ_EMPTY(&m->md.pv_list) && 4880 (m->flags & PG_FICTITIOUS) == 0) { 4881 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4882 if (TAILQ_EMPTY(&pvh->pv_list)) 4883 vm_page_aflag_clear(m, PGA_WRITEABLE); 4884 } 4885 pmap_unuse_pt(pmap, pv->pv_va, &free); 4886 } 4887 } 4888 } 4889 if (allfree) { 4890 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4891 free_pv_chunk(pc); 4892 } 4893 } 4894 sched_unpin(); 4895 pmap_invalidate_all_int(pmap); 4896 rw_wunlock(&pvh_global_lock); 4897 PMAP_UNLOCK(pmap); 4898 vm_page_free_pages_toq(&free, true); 4899 } 4900 4901 /* 4902 * pmap_is_modified: 4903 * 4904 * Return whether or not the specified physical page was modified 4905 * in any physical maps. 4906 */ 4907 static boolean_t 4908 __CONCAT(PMTYPE, is_modified)(vm_page_t m) 4909 { 4910 boolean_t rv; 4911 4912 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4913 ("pmap_is_modified: page %p is not managed", m)); 4914 4915 /* 4916 * If the page is not busied then this check is racy. 4917 */ 4918 if (!pmap_page_is_write_mapped(m)) 4919 return (FALSE); 4920 rw_wlock(&pvh_global_lock); 4921 rv = pmap_is_modified_pvh(&m->md) || 4922 ((m->flags & PG_FICTITIOUS) == 0 && 4923 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4924 rw_wunlock(&pvh_global_lock); 4925 return (rv); 4926 } 4927 4928 /* 4929 * Returns TRUE if any of the given mappings were used to modify 4930 * physical memory. Otherwise, returns FALSE. Both page and 2mpage 4931 * mappings are supported. 4932 */ 4933 static boolean_t 4934 pmap_is_modified_pvh(struct md_page *pvh) 4935 { 4936 pv_entry_t pv; 4937 pt_entry_t *pte; 4938 pmap_t pmap; 4939 boolean_t rv; 4940 4941 rw_assert(&pvh_global_lock, RA_WLOCKED); 4942 rv = FALSE; 4943 sched_pin(); 4944 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4945 pmap = PV_PMAP(pv); 4946 PMAP_LOCK(pmap); 4947 pte = pmap_pte_quick(pmap, pv->pv_va); 4948 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW); 4949 PMAP_UNLOCK(pmap); 4950 if (rv) 4951 break; 4952 } 4953 sched_unpin(); 4954 return (rv); 4955 } 4956 4957 /* 4958 * pmap_is_prefaultable: 4959 * 4960 * Return whether or not the specified virtual address is elgible 4961 * for prefault. 4962 */ 4963 static boolean_t 4964 __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr) 4965 { 4966 pd_entry_t pde; 4967 boolean_t rv; 4968 4969 rv = FALSE; 4970 PMAP_LOCK(pmap); 4971 pde = *pmap_pde(pmap, addr); 4972 if (pde != 0 && (pde & PG_PS) == 0) 4973 rv = pmap_pte_ufast(pmap, addr, pde) == 0; 4974 PMAP_UNLOCK(pmap); 4975 return (rv); 4976 } 4977 4978 /* 4979 * pmap_is_referenced: 4980 * 4981 * Return whether or not the specified physical page was referenced 4982 * in any physical maps. 4983 */ 4984 static boolean_t 4985 __CONCAT(PMTYPE, is_referenced)(vm_page_t m) 4986 { 4987 boolean_t rv; 4988 4989 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4990 ("pmap_is_referenced: page %p is not managed", m)); 4991 rw_wlock(&pvh_global_lock); 4992 rv = pmap_is_referenced_pvh(&m->md) || 4993 ((m->flags & PG_FICTITIOUS) == 0 && 4994 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4995 rw_wunlock(&pvh_global_lock); 4996 return (rv); 4997 } 4998 4999 /* 5000 * Returns TRUE if any of the given mappings were referenced and FALSE 5001 * otherwise. Both page and 4mpage mappings are supported. 5002 */ 5003 static boolean_t 5004 pmap_is_referenced_pvh(struct md_page *pvh) 5005 { 5006 pv_entry_t pv; 5007 pt_entry_t *pte; 5008 pmap_t pmap; 5009 boolean_t rv; 5010 5011 rw_assert(&pvh_global_lock, RA_WLOCKED); 5012 rv = FALSE; 5013 sched_pin(); 5014 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5015 pmap = PV_PMAP(pv); 5016 PMAP_LOCK(pmap); 5017 pte = pmap_pte_quick(pmap, pv->pv_va); 5018 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 5019 PMAP_UNLOCK(pmap); 5020 if (rv) 5021 break; 5022 } 5023 sched_unpin(); 5024 return (rv); 5025 } 5026 5027 /* 5028 * Clear the write and modified bits in each of the given page's mappings. 5029 */ 5030 static void 5031 __CONCAT(PMTYPE, remove_write)(vm_page_t m) 5032 { 5033 struct md_page *pvh; 5034 pv_entry_t next_pv, pv; 5035 pmap_t pmap; 5036 pd_entry_t *pde; 5037 pt_entry_t oldpte, *pte; 5038 vm_offset_t va; 5039 5040 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5041 ("pmap_remove_write: page %p is not managed", m)); 5042 vm_page_assert_busied(m); 5043 5044 if (!pmap_page_is_write_mapped(m)) 5045 return; 5046 rw_wlock(&pvh_global_lock); 5047 sched_pin(); 5048 if ((m->flags & PG_FICTITIOUS) != 0) 5049 goto small_mappings; 5050 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5051 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5052 va = pv->pv_va; 5053 pmap = PV_PMAP(pv); 5054 PMAP_LOCK(pmap); 5055 pde = pmap_pde(pmap, va); 5056 if ((*pde & PG_RW) != 0) 5057 (void)pmap_demote_pde(pmap, pde, va); 5058 PMAP_UNLOCK(pmap); 5059 } 5060 small_mappings: 5061 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5062 pmap = PV_PMAP(pv); 5063 PMAP_LOCK(pmap); 5064 pde = pmap_pde(pmap, pv->pv_va); 5065 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found" 5066 " a 4mpage in page %p's pv list", m)); 5067 pte = pmap_pte_quick(pmap, pv->pv_va); 5068 retry: 5069 oldpte = *pte; 5070 if ((oldpte & PG_RW) != 0) { 5071 /* 5072 * Regardless of whether a pte is 32 or 64 bits 5073 * in size, PG_RW and PG_M are among the least 5074 * significant 32 bits. 5075 */ 5076 if (!atomic_cmpset_int((u_int *)pte, oldpte, 5077 oldpte & ~(PG_RW | PG_M))) 5078 goto retry; 5079 if ((oldpte & PG_M) != 0) 5080 vm_page_dirty(m); 5081 pmap_invalidate_page_int(pmap, pv->pv_va); 5082 } 5083 PMAP_UNLOCK(pmap); 5084 } 5085 vm_page_aflag_clear(m, PGA_WRITEABLE); 5086 sched_unpin(); 5087 rw_wunlock(&pvh_global_lock); 5088 } 5089 5090 /* 5091 * pmap_ts_referenced: 5092 * 5093 * Return a count of reference bits for a page, clearing those bits. 5094 * It is not necessary for every reference bit to be cleared, but it 5095 * is necessary that 0 only be returned when there are truly no 5096 * reference bits set. 5097 * 5098 * As an optimization, update the page's dirty field if a modified bit is 5099 * found while counting reference bits. This opportunistic update can be 5100 * performed at low cost and can eliminate the need for some future calls 5101 * to pmap_is_modified(). However, since this function stops after 5102 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5103 * dirty pages. Those dirty pages will only be detected by a future call 5104 * to pmap_is_modified(). 5105 */ 5106 static int 5107 __CONCAT(PMTYPE, ts_referenced)(vm_page_t m) 5108 { 5109 struct md_page *pvh; 5110 pv_entry_t pv, pvf; 5111 pmap_t pmap; 5112 pd_entry_t *pde; 5113 pt_entry_t *pte; 5114 vm_paddr_t pa; 5115 int rtval = 0; 5116 5117 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5118 ("pmap_ts_referenced: page %p is not managed", m)); 5119 pa = VM_PAGE_TO_PHYS(m); 5120 pvh = pa_to_pvh(pa); 5121 rw_wlock(&pvh_global_lock); 5122 sched_pin(); 5123 if ((m->flags & PG_FICTITIOUS) != 0 || 5124 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5125 goto small_mappings; 5126 pv = pvf; 5127 do { 5128 pmap = PV_PMAP(pv); 5129 PMAP_LOCK(pmap); 5130 pde = pmap_pde(pmap, pv->pv_va); 5131 if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5132 /* 5133 * Although "*pde" is mapping a 2/4MB page, because 5134 * this function is called at a 4KB page granularity, 5135 * we only update the 4KB page under test. 5136 */ 5137 vm_page_dirty(m); 5138 } 5139 if ((*pde & PG_A) != 0) { 5140 /* 5141 * Since this reference bit is shared by either 1024 5142 * or 512 4KB pages, it should not be cleared every 5143 * time it is tested. Apply a simple "hash" function 5144 * on the physical page number, the virtual superpage 5145 * number, and the pmap address to select one 4KB page 5146 * out of the 1024 or 512 on which testing the 5147 * reference bit will result in clearing that bit. 5148 * This function is designed to avoid the selection of 5149 * the same 4KB page for every 2- or 4MB page mapping. 5150 * 5151 * On demotion, a mapping that hasn't been referenced 5152 * is simply destroyed. To avoid the possibility of a 5153 * subsequent page fault on a demoted wired mapping, 5154 * always leave its reference bit set. Moreover, 5155 * since the superpage is wired, the current state of 5156 * its reference bit won't affect page replacement. 5157 */ 5158 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ 5159 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 5160 (*pde & PG_W) == 0) { 5161 atomic_clear_int((u_int *)pde, PG_A); 5162 pmap_invalidate_page_int(pmap, pv->pv_va); 5163 } 5164 rtval++; 5165 } 5166 PMAP_UNLOCK(pmap); 5167 /* Rotate the PV list if it has more than one entry. */ 5168 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5169 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5170 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5171 } 5172 if (rtval >= PMAP_TS_REFERENCED_MAX) 5173 goto out; 5174 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5175 small_mappings: 5176 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5177 goto out; 5178 pv = pvf; 5179 do { 5180 pmap = PV_PMAP(pv); 5181 PMAP_LOCK(pmap); 5182 pde = pmap_pde(pmap, pv->pv_va); 5183 KASSERT((*pde & PG_PS) == 0, 5184 ("pmap_ts_referenced: found a 4mpage in page %p's pv list", 5185 m)); 5186 pte = pmap_pte_quick(pmap, pv->pv_va); 5187 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5188 vm_page_dirty(m); 5189 if ((*pte & PG_A) != 0) { 5190 atomic_clear_int((u_int *)pte, PG_A); 5191 pmap_invalidate_page_int(pmap, pv->pv_va); 5192 rtval++; 5193 } 5194 PMAP_UNLOCK(pmap); 5195 /* Rotate the PV list if it has more than one entry. */ 5196 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5197 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5198 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5199 } 5200 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5201 PMAP_TS_REFERENCED_MAX); 5202 out: 5203 sched_unpin(); 5204 rw_wunlock(&pvh_global_lock); 5205 return (rtval); 5206 } 5207 5208 /* 5209 * Apply the given advice to the specified range of addresses within the 5210 * given pmap. Depending on the advice, clear the referenced and/or 5211 * modified flags in each mapping and set the mapped page's dirty field. 5212 */ 5213 static void 5214 __CONCAT(PMTYPE, advise)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 5215 int advice) 5216 { 5217 pd_entry_t oldpde, *pde; 5218 pt_entry_t *pte; 5219 vm_offset_t va, pdnxt; 5220 vm_page_t m; 5221 bool anychanged, pv_lists_locked; 5222 5223 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5224 return; 5225 if (pmap_is_current(pmap)) 5226 pv_lists_locked = false; 5227 else { 5228 pv_lists_locked = true; 5229 resume: 5230 rw_wlock(&pvh_global_lock); 5231 sched_pin(); 5232 } 5233 anychanged = false; 5234 PMAP_LOCK(pmap); 5235 for (; sva < eva; sva = pdnxt) { 5236 pdnxt = (sva + NBPDR) & ~PDRMASK; 5237 if (pdnxt < sva) 5238 pdnxt = eva; 5239 pde = pmap_pde(pmap, sva); 5240 oldpde = *pde; 5241 if ((oldpde & PG_V) == 0) 5242 continue; 5243 else if ((oldpde & PG_PS) != 0) { 5244 if ((oldpde & PG_MANAGED) == 0) 5245 continue; 5246 if (!pv_lists_locked) { 5247 pv_lists_locked = true; 5248 if (!rw_try_wlock(&pvh_global_lock)) { 5249 if (anychanged) 5250 pmap_invalidate_all_int(pmap); 5251 PMAP_UNLOCK(pmap); 5252 goto resume; 5253 } 5254 sched_pin(); 5255 } 5256 if (!pmap_demote_pde(pmap, pde, sva)) { 5257 /* 5258 * The large page mapping was destroyed. 5259 */ 5260 continue; 5261 } 5262 5263 /* 5264 * Unless the page mappings are wired, remove the 5265 * mapping to a single page so that a subsequent 5266 * access may repromote. Choosing the last page 5267 * within the address range [sva, min(pdnxt, eva)) 5268 * generally results in more repromotions. Since the 5269 * underlying page table page is fully populated, this 5270 * removal never frees a page table page. 5271 */ 5272 if ((oldpde & PG_W) == 0) { 5273 va = eva; 5274 if (va > pdnxt) 5275 va = pdnxt; 5276 va -= PAGE_SIZE; 5277 KASSERT(va >= sva, 5278 ("pmap_advise: no address gap")); 5279 pte = pmap_pte_quick(pmap, va); 5280 KASSERT((*pte & PG_V) != 0, 5281 ("pmap_advise: invalid PTE")); 5282 pmap_remove_pte(pmap, pte, va, NULL); 5283 anychanged = true; 5284 } 5285 } 5286 if (pdnxt > eva) 5287 pdnxt = eva; 5288 va = pdnxt; 5289 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 5290 sva += PAGE_SIZE) { 5291 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) 5292 goto maybe_invlrng; 5293 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5294 if (advice == MADV_DONTNEED) { 5295 /* 5296 * Future calls to pmap_is_modified() 5297 * can be avoided by making the page 5298 * dirty now. 5299 */ 5300 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); 5301 vm_page_dirty(m); 5302 } 5303 atomic_clear_int((u_int *)pte, PG_M | PG_A); 5304 } else if ((*pte & PG_A) != 0) 5305 atomic_clear_int((u_int *)pte, PG_A); 5306 else 5307 goto maybe_invlrng; 5308 if ((*pte & PG_G) != 0) { 5309 if (va == pdnxt) 5310 va = sva; 5311 } else 5312 anychanged = true; 5313 continue; 5314 maybe_invlrng: 5315 if (va != pdnxt) { 5316 pmap_invalidate_range_int(pmap, va, sva); 5317 va = pdnxt; 5318 } 5319 } 5320 if (va != pdnxt) 5321 pmap_invalidate_range_int(pmap, va, sva); 5322 } 5323 if (anychanged) 5324 pmap_invalidate_all_int(pmap); 5325 if (pv_lists_locked) { 5326 sched_unpin(); 5327 rw_wunlock(&pvh_global_lock); 5328 } 5329 PMAP_UNLOCK(pmap); 5330 } 5331 5332 /* 5333 * Clear the modify bits on the specified physical page. 5334 */ 5335 static void 5336 __CONCAT(PMTYPE, clear_modify)(vm_page_t m) 5337 { 5338 struct md_page *pvh; 5339 pv_entry_t next_pv, pv; 5340 pmap_t pmap; 5341 pd_entry_t oldpde, *pde; 5342 pt_entry_t *pte; 5343 vm_offset_t va; 5344 5345 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5346 ("pmap_clear_modify: page %p is not managed", m)); 5347 vm_page_assert_busied(m); 5348 5349 if (!pmap_page_is_write_mapped(m)) 5350 return; 5351 rw_wlock(&pvh_global_lock); 5352 sched_pin(); 5353 if ((m->flags & PG_FICTITIOUS) != 0) 5354 goto small_mappings; 5355 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5356 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5357 va = pv->pv_va; 5358 pmap = PV_PMAP(pv); 5359 PMAP_LOCK(pmap); 5360 pde = pmap_pde(pmap, va); 5361 oldpde = *pde; 5362 /* If oldpde has PG_RW set, then it also has PG_M set. */ 5363 if ((oldpde & PG_RW) != 0 && 5364 pmap_demote_pde(pmap, pde, va) && 5365 (oldpde & PG_W) == 0) { 5366 /* 5367 * Write protect the mapping to a single page so that 5368 * a subsequent write access may repromote. 5369 */ 5370 va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); 5371 pte = pmap_pte_quick(pmap, va); 5372 /* 5373 * Regardless of whether a pte is 32 or 64 bits 5374 * in size, PG_RW and PG_M are among the least 5375 * significant 32 bits. 5376 */ 5377 atomic_clear_int((u_int *)pte, PG_M | PG_RW); 5378 vm_page_dirty(m); 5379 pmap_invalidate_page_int(pmap, va); 5380 } 5381 PMAP_UNLOCK(pmap); 5382 } 5383 small_mappings: 5384 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5385 pmap = PV_PMAP(pv); 5386 PMAP_LOCK(pmap); 5387 pde = pmap_pde(pmap, pv->pv_va); 5388 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" 5389 " a 4mpage in page %p's pv list", m)); 5390 pte = pmap_pte_quick(pmap, pv->pv_va); 5391 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5392 /* 5393 * Regardless of whether a pte is 32 or 64 bits 5394 * in size, PG_M is among the least significant 5395 * 32 bits. 5396 */ 5397 atomic_clear_int((u_int *)pte, PG_M); 5398 pmap_invalidate_page_int(pmap, pv->pv_va); 5399 } 5400 PMAP_UNLOCK(pmap); 5401 } 5402 sched_unpin(); 5403 rw_wunlock(&pvh_global_lock); 5404 } 5405 5406 /* 5407 * Miscellaneous support routines follow 5408 */ 5409 5410 /* Adjust the cache mode for a 4KB page mapped via a PTE. */ 5411 static __inline void 5412 pmap_pte_attr(pt_entry_t *pte, int cache_bits) 5413 { 5414 u_int opte, npte; 5415 5416 /* 5417 * The cache mode bits are all in the low 32-bits of the 5418 * PTE, so we can just spin on updating the low 32-bits. 5419 */ 5420 do { 5421 opte = *(u_int *)pte; 5422 npte = opte & ~PG_PTE_CACHE; 5423 npte |= cache_bits; 5424 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); 5425 } 5426 5427 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */ 5428 static __inline void 5429 pmap_pde_attr(pd_entry_t *pde, int cache_bits) 5430 { 5431 u_int opde, npde; 5432 5433 /* 5434 * The cache mode bits are all in the low 32-bits of the 5435 * PDE, so we can just spin on updating the low 32-bits. 5436 */ 5437 do { 5438 opde = *(u_int *)pde; 5439 npde = opde & ~PG_PDE_CACHE; 5440 npde |= cache_bits; 5441 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); 5442 } 5443 5444 /* 5445 * Map a set of physical memory pages into the kernel virtual 5446 * address space. Return a pointer to where it is mapped. This 5447 * routine is intended to be used for mapping device memory, 5448 * NOT real memory. 5449 */ 5450 static void * 5451 __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode, 5452 int flags) 5453 { 5454 struct pmap_preinit_mapping *ppim; 5455 vm_offset_t va, offset; 5456 vm_page_t m; 5457 vm_size_t tmpsize; 5458 int i; 5459 5460 offset = pa & PAGE_MASK; 5461 size = round_page(offset + size); 5462 pa = pa & PG_FRAME; 5463 5464 if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) { 5465 va = pa + PMAP_MAP_LOW; 5466 if ((flags & MAPDEV_SETATTR) == 0) 5467 return ((void *)(va + offset)); 5468 } else if (!pmap_initialized) { 5469 va = 0; 5470 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5471 ppim = pmap_preinit_mapping + i; 5472 if (ppim->va == 0) { 5473 ppim->pa = pa; 5474 ppim->sz = size; 5475 ppim->mode = mode; 5476 ppim->va = virtual_avail; 5477 virtual_avail += size; 5478 va = ppim->va; 5479 break; 5480 } 5481 } 5482 if (va == 0) 5483 panic("%s: too many preinit mappings", __func__); 5484 } else { 5485 /* 5486 * If we have a preinit mapping, re-use it. 5487 */ 5488 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5489 ppim = pmap_preinit_mapping + i; 5490 if (ppim->pa == pa && ppim->sz == size && 5491 (ppim->mode == mode || 5492 (flags & MAPDEV_SETATTR) == 0)) 5493 return ((void *)(ppim->va + offset)); 5494 } 5495 va = kva_alloc(size); 5496 if (va == 0) 5497 panic("%s: Couldn't allocate KVA", __func__); 5498 } 5499 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) { 5500 if ((flags & MAPDEV_SETATTR) == 0 && pmap_initialized) { 5501 m = PHYS_TO_VM_PAGE(pa); 5502 if (m != NULL && VM_PAGE_TO_PHYS(m) == pa) { 5503 pmap_kenter_attr(va + tmpsize, pa + tmpsize, 5504 m->md.pat_mode); 5505 continue; 5506 } 5507 } 5508 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 5509 } 5510 pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize); 5511 pmap_invalidate_cache_range(va, va + size); 5512 return ((void *)(va + offset)); 5513 } 5514 5515 static void 5516 __CONCAT(PMTYPE, unmapdev)(vm_offset_t va, vm_size_t size) 5517 { 5518 struct pmap_preinit_mapping *ppim; 5519 vm_offset_t offset; 5520 int i; 5521 5522 if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE) 5523 return; 5524 offset = va & PAGE_MASK; 5525 size = round_page(offset + size); 5526 va = trunc_page(va); 5527 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { 5528 ppim = pmap_preinit_mapping + i; 5529 if (ppim->va == va && ppim->sz == size) { 5530 if (pmap_initialized) 5531 return; 5532 ppim->pa = 0; 5533 ppim->va = 0; 5534 ppim->sz = 0; 5535 ppim->mode = 0; 5536 if (va + size == virtual_avail) 5537 virtual_avail = va; 5538 return; 5539 } 5540 } 5541 if (pmap_initialized) 5542 kva_free(va, size); 5543 } 5544 5545 /* 5546 * Sets the memory attribute for the specified page. 5547 */ 5548 static void 5549 __CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma) 5550 { 5551 5552 m->md.pat_mode = ma; 5553 if ((m->flags & PG_FICTITIOUS) != 0) 5554 return; 5555 5556 /* 5557 * If "m" is a normal page, flush it from the cache. 5558 * See pmap_invalidate_cache_range(). 5559 * 5560 * First, try to find an existing mapping of the page by sf 5561 * buffer. sf_buf_invalidate_cache() modifies mapping and 5562 * flushes the cache. 5563 */ 5564 if (sf_buf_invalidate_cache(m)) 5565 return; 5566 5567 /* 5568 * If page is not mapped by sf buffer, but CPU does not 5569 * support self snoop, map the page transient and do 5570 * invalidation. In the worst case, whole cache is flushed by 5571 * pmap_invalidate_cache_range(). 5572 */ 5573 if ((cpu_feature & CPUID_SS) == 0) 5574 pmap_flush_page(m); 5575 } 5576 5577 static void 5578 __CONCAT(PMTYPE, flush_page)(vm_page_t m) 5579 { 5580 pt_entry_t *cmap_pte2; 5581 struct pcpu *pc; 5582 vm_offset_t sva, eva; 5583 bool useclflushopt; 5584 5585 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0; 5586 if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) { 5587 sched_pin(); 5588 pc = get_pcpu(); 5589 cmap_pte2 = pc->pc_cmap_pte2; 5590 mtx_lock(&pc->pc_cmap_lock); 5591 if (*cmap_pte2) 5592 panic("pmap_flush_page: CMAP2 busy"); 5593 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | 5594 PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 5595 0); 5596 invlcaddr(pc->pc_cmap_addr2); 5597 sva = (vm_offset_t)pc->pc_cmap_addr2; 5598 eva = sva + PAGE_SIZE; 5599 5600 /* 5601 * Use mfence or sfence despite the ordering implied by 5602 * mtx_{un,}lock() because clflush on non-Intel CPUs 5603 * and clflushopt are not guaranteed to be ordered by 5604 * any other instruction. 5605 */ 5606 if (useclflushopt) 5607 sfence(); 5608 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5609 mfence(); 5610 for (; sva < eva; sva += cpu_clflush_line_size) { 5611 if (useclflushopt) 5612 clflushopt(sva); 5613 else 5614 clflush(sva); 5615 } 5616 if (useclflushopt) 5617 sfence(); 5618 else if (cpu_vendor_id != CPU_VENDOR_INTEL) 5619 mfence(); 5620 *cmap_pte2 = 0; 5621 sched_unpin(); 5622 mtx_unlock(&pc->pc_cmap_lock); 5623 } else 5624 pmap_invalidate_cache(); 5625 } 5626 5627 /* 5628 * Changes the specified virtual address range's memory type to that given by 5629 * the parameter "mode". The specified virtual address range must be 5630 * completely contained within either the kernel map. 5631 * 5632 * Returns zero if the change completed successfully, and either EINVAL or 5633 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 5634 * of the virtual address range was not mapped, and ENOMEM is returned if 5635 * there was insufficient memory available to complete the change. 5636 */ 5637 static int 5638 __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode) 5639 { 5640 vm_offset_t base, offset, tmpva; 5641 pd_entry_t *pde; 5642 pt_entry_t *pte; 5643 int cache_bits_pte, cache_bits_pde; 5644 boolean_t changed; 5645 5646 base = trunc_page(va); 5647 offset = va & PAGE_MASK; 5648 size = round_page(offset + size); 5649 5650 /* 5651 * Only supported on kernel virtual addresses above the recursive map. 5652 */ 5653 if (base < VM_MIN_KERNEL_ADDRESS) 5654 return (EINVAL); 5655 5656 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); 5657 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); 5658 changed = FALSE; 5659 5660 /* 5661 * Pages that aren't mapped aren't supported. Also break down 5662 * 2/4MB pages into 4KB pages if required. 5663 */ 5664 PMAP_LOCK(kernel_pmap); 5665 for (tmpva = base; tmpva < base + size; ) { 5666 pde = pmap_pde(kernel_pmap, tmpva); 5667 if (*pde == 0) { 5668 PMAP_UNLOCK(kernel_pmap); 5669 return (EINVAL); 5670 } 5671 if (*pde & PG_PS) { 5672 /* 5673 * If the current 2/4MB page already has 5674 * the required memory type, then we need not 5675 * demote this page. Just increment tmpva to 5676 * the next 2/4MB page frame. 5677 */ 5678 if ((*pde & PG_PDE_CACHE) == cache_bits_pde) { 5679 tmpva = trunc_4mpage(tmpva) + NBPDR; 5680 continue; 5681 } 5682 5683 /* 5684 * If the current offset aligns with a 2/4MB 5685 * page frame and there is at least 2/4MB left 5686 * within the range, then we need not break 5687 * down this page into 4KB pages. 5688 */ 5689 if ((tmpva & PDRMASK) == 0 && 5690 tmpva + PDRMASK < base + size) { 5691 tmpva += NBPDR; 5692 continue; 5693 } 5694 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) { 5695 PMAP_UNLOCK(kernel_pmap); 5696 return (ENOMEM); 5697 } 5698 } 5699 pte = vtopte(tmpva); 5700 if (*pte == 0) { 5701 PMAP_UNLOCK(kernel_pmap); 5702 return (EINVAL); 5703 } 5704 tmpva += PAGE_SIZE; 5705 } 5706 PMAP_UNLOCK(kernel_pmap); 5707 5708 /* 5709 * Ok, all the pages exist, so run through them updating their 5710 * cache mode if required. 5711 */ 5712 for (tmpva = base; tmpva < base + size; ) { 5713 pde = pmap_pde(kernel_pmap, tmpva); 5714 if (*pde & PG_PS) { 5715 if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { 5716 pmap_pde_attr(pde, cache_bits_pde); 5717 changed = TRUE; 5718 } 5719 tmpva = trunc_4mpage(tmpva) + NBPDR; 5720 } else { 5721 pte = vtopte(tmpva); 5722 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { 5723 pmap_pte_attr(pte, cache_bits_pte); 5724 changed = TRUE; 5725 } 5726 tmpva += PAGE_SIZE; 5727 } 5728 } 5729 5730 /* 5731 * Flush CPU caches to make sure any data isn't cached that 5732 * shouldn't be, etc. 5733 */ 5734 if (changed) { 5735 pmap_invalidate_range_int(kernel_pmap, base, tmpva); 5736 pmap_invalidate_cache_range(base, tmpva); 5737 } 5738 return (0); 5739 } 5740 5741 /* 5742 * Perform the pmap work for mincore(2). If the page is not both referenced and 5743 * modified by this pmap, returns its physical address so that the caller can 5744 * find other mappings. 5745 */ 5746 static int 5747 __CONCAT(PMTYPE, mincore)(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) 5748 { 5749 pd_entry_t pde; 5750 pt_entry_t pte; 5751 vm_paddr_t pa; 5752 int val; 5753 5754 PMAP_LOCK(pmap); 5755 pde = *pmap_pde(pmap, addr); 5756 if (pde != 0) { 5757 if ((pde & PG_PS) != 0) { 5758 pte = pde; 5759 /* Compute the physical address of the 4KB page. */ 5760 pa = ((pde & PG_PS_FRAME) | (addr & PDRMASK)) & 5761 PG_FRAME; 5762 val = MINCORE_SUPER; 5763 } else { 5764 pte = pmap_pte_ufast(pmap, addr, pde); 5765 pa = pte & PG_FRAME; 5766 val = 0; 5767 } 5768 } else { 5769 pte = 0; 5770 pa = 0; 5771 val = 0; 5772 } 5773 if ((pte & PG_V) != 0) { 5774 val |= MINCORE_INCORE; 5775 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5776 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 5777 if ((pte & PG_A) != 0) 5778 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 5779 } 5780 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 5781 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 5782 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 5783 *pap = pa; 5784 } 5785 PMAP_UNLOCK(pmap); 5786 return (val); 5787 } 5788 5789 static void 5790 __CONCAT(PMTYPE, activate)(struct thread *td) 5791 { 5792 pmap_t pmap, oldpmap; 5793 u_int cpuid; 5794 u_int32_t cr3; 5795 5796 critical_enter(); 5797 pmap = vmspace_pmap(td->td_proc->p_vmspace); 5798 oldpmap = PCPU_GET(curpmap); 5799 cpuid = PCPU_GET(cpuid); 5800 #if defined(SMP) 5801 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 5802 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5803 #else 5804 CPU_CLR(cpuid, &oldpmap->pm_active); 5805 CPU_SET(cpuid, &pmap->pm_active); 5806 #endif 5807 #ifdef PMAP_PAE_COMP 5808 cr3 = vtophys(pmap->pm_pdpt); 5809 #else 5810 cr3 = vtophys(pmap->pm_pdir); 5811 #endif 5812 /* 5813 * pmap_activate is for the current thread on the current cpu 5814 */ 5815 td->td_pcb->pcb_cr3 = cr3; 5816 PCPU_SET(curpmap, pmap); 5817 critical_exit(); 5818 } 5819 5820 static void 5821 __CONCAT(PMTYPE, activate_boot)(pmap_t pmap) 5822 { 5823 u_int cpuid; 5824 5825 cpuid = PCPU_GET(cpuid); 5826 #if defined(SMP) 5827 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 5828 #else 5829 CPU_SET(cpuid, &pmap->pm_active); 5830 #endif 5831 PCPU_SET(curpmap, pmap); 5832 } 5833 5834 /* 5835 * Increase the starting virtual address of the given mapping if a 5836 * different alignment might result in more superpage mappings. 5837 */ 5838 static void 5839 __CONCAT(PMTYPE, align_superpage)(vm_object_t object, vm_ooffset_t offset, 5840 vm_offset_t *addr, vm_size_t size) 5841 { 5842 vm_offset_t superpage_offset; 5843 5844 if (size < NBPDR) 5845 return; 5846 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 5847 offset += ptoa(object->pg_color); 5848 superpage_offset = offset & PDRMASK; 5849 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 5850 (*addr & PDRMASK) == superpage_offset) 5851 return; 5852 if ((*addr & PDRMASK) < superpage_offset) 5853 *addr = (*addr & ~PDRMASK) + superpage_offset; 5854 else 5855 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 5856 } 5857 5858 static vm_offset_t 5859 __CONCAT(PMTYPE, quick_enter_page)(vm_page_t m) 5860 { 5861 vm_offset_t qaddr; 5862 pt_entry_t *pte; 5863 5864 critical_enter(); 5865 qaddr = PCPU_GET(qmap_addr); 5866 pte = vtopte(qaddr); 5867 5868 KASSERT(*pte == 0, 5869 ("pmap_quick_enter_page: PTE busy %#jx", (uintmax_t)*pte)); 5870 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 5871 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0); 5872 invlpg(qaddr); 5873 5874 return (qaddr); 5875 } 5876 5877 static void 5878 __CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr) 5879 { 5880 vm_offset_t qaddr; 5881 pt_entry_t *pte; 5882 5883 qaddr = PCPU_GET(qmap_addr); 5884 pte = vtopte(qaddr); 5885 5886 KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); 5887 KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); 5888 5889 *pte = 0; 5890 critical_exit(); 5891 } 5892 5893 static vmem_t *pmap_trm_arena; 5894 static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS; 5895 static int trm_guard = PAGE_SIZE; 5896 5897 static int 5898 pmap_trm_import(void *unused __unused, vmem_size_t size, int flags, 5899 vmem_addr_t *addrp) 5900 { 5901 vm_page_t m; 5902 vmem_addr_t af, addr, prev_addr; 5903 pt_entry_t *trm_pte; 5904 5905 prev_addr = atomic_load_long(&pmap_trm_arena_last); 5906 size = round_page(size) + trm_guard; 5907 for (;;) { 5908 if (prev_addr + size < prev_addr || prev_addr + size < size || 5909 prev_addr + size > PMAP_TRM_MAX_ADDRESS) 5910 return (ENOMEM); 5911 addr = prev_addr + size; 5912 if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr)) 5913 break; 5914 } 5915 prev_addr += trm_guard; 5916 trm_pte = PTmap + atop(prev_addr); 5917 for (af = prev_addr; af < addr; af += PAGE_SIZE) { 5918 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | 5919 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK); 5920 pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | 5921 PG_M | PG_A | PG_RW | PG_V | pgeflag | 5922 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); 5923 } 5924 *addrp = prev_addr; 5925 return (0); 5926 } 5927 5928 void 5929 pmap_init_trm(void) 5930 { 5931 vm_page_t pd_m; 5932 5933 TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard); 5934 if ((trm_guard & PAGE_MASK) != 0) 5935 trm_guard = 0; 5936 pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); 5937 vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); 5938 pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | 5939 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO); 5940 if ((pd_m->flags & PG_ZERO) == 0) 5941 pmap_zero_page(pd_m); 5942 PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | 5943 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); 5944 } 5945 5946 static void * 5947 __CONCAT(PMTYPE, trm_alloc)(size_t size, int flags) 5948 { 5949 vmem_addr_t res; 5950 int error; 5951 5952 MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0); 5953 error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int), 5954 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res); 5955 if (error != 0) 5956 return (NULL); 5957 if ((flags & M_ZERO) != 0) 5958 bzero((void *)res, size); 5959 return ((void *)res); 5960 } 5961 5962 static void 5963 __CONCAT(PMTYPE, trm_free)(void *addr, size_t size) 5964 { 5965 5966 vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4)); 5967 } 5968 5969 static void 5970 __CONCAT(PMTYPE, ksetrw)(vm_offset_t va) 5971 { 5972 5973 *vtopte(va) |= PG_RW; 5974 } 5975 5976 static void 5977 __CONCAT(PMTYPE, remap_lowptdi)(bool enable) 5978 { 5979 5980 PTD[KPTDI] = enable ? PTD[LOWPTDI] : 0; 5981 invltlb_glob(); 5982 } 5983 5984 static vm_offset_t 5985 __CONCAT(PMTYPE, get_map_low)(void) 5986 { 5987 5988 return (PMAP_MAP_LOW); 5989 } 5990 5991 static vm_offset_t 5992 __CONCAT(PMTYPE, get_vm_maxuser_address)(void) 5993 { 5994 5995 return (VM_MAXUSER_ADDRESS); 5996 } 5997 5998 static vm_paddr_t 5999 __CONCAT(PMTYPE, pg_frame)(vm_paddr_t pa) 6000 { 6001 6002 return (pa & PG_FRAME); 6003 } 6004 6005 static void 6006 __CONCAT(PMTYPE, sf_buf_map)(struct sf_buf *sf) 6007 { 6008 pt_entry_t opte, *ptep; 6009 6010 /* 6011 * Update the sf_buf's virtual-to-physical mapping, flushing the 6012 * virtual address from the TLB. Since the reference count for 6013 * the sf_buf's old mapping was zero, that mapping is not 6014 * currently in use. Consequently, there is no need to exchange 6015 * the old and new PTEs atomically, even under PAE. 6016 */ 6017 ptep = vtopte(sf->kva); 6018 opte = *ptep; 6019 *ptep = VM_PAGE_TO_PHYS(sf->m) | PG_RW | PG_V | 6020 pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, 0); 6021 6022 /* 6023 * Avoid unnecessary TLB invalidations: If the sf_buf's old 6024 * virtual-to-physical mapping was not used, then any processor 6025 * that has invalidated the sf_buf's virtual address from its TLB 6026 * since the last used mapping need not invalidate again. 6027 */ 6028 #ifdef SMP 6029 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6030 CPU_ZERO(&sf->cpumask); 6031 #else 6032 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 6033 pmap_invalidate_page_int(kernel_pmap, sf->kva); 6034 #endif 6035 } 6036 6037 static void 6038 __CONCAT(PMTYPE, cp_slow0_map)(vm_offset_t kaddr, int plen, vm_page_t *ma) 6039 { 6040 pt_entry_t *pte; 6041 int i; 6042 6043 for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) { 6044 *pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) | 6045 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]), 6046 FALSE); 6047 invlpg(kaddr + ptoa(i)); 6048 } 6049 } 6050 6051 static u_int 6052 __CONCAT(PMTYPE, get_kcr3)(void) 6053 { 6054 6055 #ifdef PMAP_PAE_COMP 6056 return ((u_int)IdlePDPT); 6057 #else 6058 return ((u_int)IdlePTD); 6059 #endif 6060 } 6061 6062 static u_int 6063 __CONCAT(PMTYPE, get_cr3)(pmap_t pmap) 6064 { 6065 6066 #ifdef PMAP_PAE_COMP 6067 return ((u_int)vtophys(pmap->pm_pdpt)); 6068 #else 6069 return ((u_int)vtophys(pmap->pm_pdir)); 6070 #endif 6071 } 6072 6073 static caddr_t 6074 __CONCAT(PMTYPE, cmap3)(vm_paddr_t pa, u_int pte_bits) 6075 { 6076 pt_entry_t *pte; 6077 6078 pte = CMAP3; 6079 *pte = pa | pte_bits; 6080 invltlb(); 6081 return (CADDR3); 6082 } 6083 6084 static void 6085 __CONCAT(PMTYPE, basemem_setup)(u_int basemem) 6086 { 6087 pt_entry_t *pte; 6088 int i; 6089 6090 /* 6091 * Map pages between basemem and ISA_HOLE_START, if any, r/w into 6092 * the vm86 page table so that vm86 can scribble on them using 6093 * the vm86 map too. XXX: why 2 ways for this and only 1 way for 6094 * page 0, at least as initialized here? 6095 */ 6096 pte = (pt_entry_t *)vm86paddr; 6097 for (i = basemem / 4; i < 160; i++) 6098 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 6099 } 6100 6101 struct bios16_pmap_handle { 6102 pt_entry_t *pte; 6103 pd_entry_t *ptd; 6104 pt_entry_t orig_ptd; 6105 }; 6106 6107 static void * 6108 __CONCAT(PMTYPE, bios16_enter)(void) 6109 { 6110 struct bios16_pmap_handle *h; 6111 6112 /* 6113 * no page table, so create one and install it. 6114 */ 6115 h = malloc(sizeof(struct bios16_pmap_handle), M_TEMP, M_WAITOK); 6116 h->pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 6117 h->ptd = IdlePTD; 6118 *h->pte = vm86phystk | PG_RW | PG_V; 6119 h->orig_ptd = *h->ptd; 6120 *h->ptd = vtophys(h->pte) | PG_RW | PG_V; 6121 pmap_invalidate_all_int(kernel_pmap); /* XXX insurance for now */ 6122 return (h); 6123 } 6124 6125 static void 6126 __CONCAT(PMTYPE, bios16_leave)(void *arg) 6127 { 6128 struct bios16_pmap_handle *h; 6129 6130 h = arg; 6131 *h->ptd = h->orig_ptd; /* remove page table */ 6132 /* 6133 * XXX only needs to be invlpg(0) but that doesn't work on the 386 6134 */ 6135 pmap_invalidate_all_int(kernel_pmap); 6136 free(h->pte, M_TEMP); /* ... and free it */ 6137 } 6138 6139 struct pmap_kernel_map_range { 6140 vm_offset_t sva; 6141 pt_entry_t attrs; 6142 int ptes; 6143 int pdes; 6144 int pdpes; 6145 }; 6146 6147 static void 6148 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range, 6149 vm_offset_t eva) 6150 { 6151 const char *mode; 6152 int i, pat_idx; 6153 6154 if (eva <= range->sva) 6155 return; 6156 6157 pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true); 6158 for (i = 0; i < PAT_INDEX_SIZE; i++) 6159 if (pat_index[i] == pat_idx) 6160 break; 6161 6162 switch (i) { 6163 case PAT_WRITE_BACK: 6164 mode = "WB"; 6165 break; 6166 case PAT_WRITE_THROUGH: 6167 mode = "WT"; 6168 break; 6169 case PAT_UNCACHEABLE: 6170 mode = "UC"; 6171 break; 6172 case PAT_UNCACHED: 6173 mode = "U-"; 6174 break; 6175 case PAT_WRITE_PROTECTED: 6176 mode = "WP"; 6177 break; 6178 case PAT_WRITE_COMBINING: 6179 mode = "WC"; 6180 break; 6181 default: 6182 printf("%s: unknown PAT mode %#x for range 0x%08x-0x%08x\n", 6183 __func__, pat_idx, range->sva, eva); 6184 mode = "??"; 6185 break; 6186 } 6187 6188 sbuf_printf(sb, "0x%08x-0x%08x r%c%c%c%c %s %d %d %d\n", 6189 range->sva, eva, 6190 (range->attrs & PG_RW) != 0 ? 'w' : '-', 6191 (range->attrs & pg_nx) != 0 ? '-' : 'x', 6192 (range->attrs & PG_U) != 0 ? 'u' : 's', 6193 (range->attrs & PG_G) != 0 ? 'g' : '-', 6194 mode, range->pdpes, range->pdes, range->ptes); 6195 6196 /* Reset to sentinel value. */ 6197 range->sva = 0xffffffff; 6198 } 6199 6200 /* 6201 * Determine whether the attributes specified by a page table entry match those 6202 * being tracked by the current range. This is not quite as simple as a direct 6203 * flag comparison since some PAT modes have multiple representations. 6204 */ 6205 static bool 6206 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs) 6207 { 6208 pt_entry_t diff, mask; 6209 6210 mask = pg_nx | PG_G | PG_RW | PG_U | PG_PDE_CACHE; 6211 diff = (range->attrs ^ attrs) & mask; 6212 if (diff == 0) 6213 return (true); 6214 if ((diff & ~PG_PDE_PAT) == 0 && 6215 pmap_pat_index(kernel_pmap, range->attrs, true) == 6216 pmap_pat_index(kernel_pmap, attrs, true)) 6217 return (true); 6218 return (false); 6219 } 6220 6221 static void 6222 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va, 6223 pt_entry_t attrs) 6224 { 6225 6226 memset(range, 0, sizeof(*range)); 6227 range->sva = va; 6228 range->attrs = attrs; 6229 } 6230 6231 /* 6232 * Given a leaf PTE, derive the mapping's attributes. If they do not match 6233 * those of the current run, dump the address range and its attributes, and 6234 * begin a new run. 6235 */ 6236 static void 6237 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, 6238 vm_offset_t va, pd_entry_t pde, pt_entry_t pte) 6239 { 6240 pt_entry_t attrs; 6241 6242 attrs = pde & (PG_RW | PG_U | pg_nx); 6243 6244 if ((pde & PG_PS) != 0) { 6245 attrs |= pde & (PG_G | PG_PDE_CACHE); 6246 } else if (pte != 0) { 6247 attrs |= pte & pg_nx; 6248 attrs &= pg_nx | (pte & (PG_RW | PG_U)); 6249 attrs |= pte & (PG_G | PG_PTE_CACHE); 6250 6251 /* Canonicalize by always using the PDE PAT bit. */ 6252 if ((attrs & PG_PTE_PAT) != 0) 6253 attrs ^= PG_PDE_PAT | PG_PTE_PAT; 6254 } 6255 6256 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { 6257 sysctl_kmaps_dump(sb, range, va); 6258 sysctl_kmaps_reinit(range, va, attrs); 6259 } 6260 } 6261 6262 static int 6263 __CONCAT(PMTYPE, sysctl_kmaps)(SYSCTL_HANDLER_ARGS) 6264 { 6265 struct pmap_kernel_map_range range; 6266 struct sbuf sbuf, *sb; 6267 pd_entry_t pde; 6268 pt_entry_t *pt, pte; 6269 vm_offset_t sva; 6270 vm_paddr_t pa; 6271 int error; 6272 u_int i, k; 6273 6274 error = sysctl_wire_old_buffer(req, 0); 6275 if (error != 0) 6276 return (error); 6277 sb = &sbuf; 6278 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req); 6279 6280 /* Sentinel value. */ 6281 range.sva = 0xffffffff; 6282 6283 /* 6284 * Iterate over the kernel page tables without holding the 6285 * kernel pmap lock. Kernel page table pages are never freed, 6286 * so at worst we will observe inconsistencies in the output. 6287 */ 6288 for (sva = 0, i = 0; i < NPTEPG * NPGPTD * NPDEPG ;) { 6289 if (i == 0) 6290 sbuf_printf(sb, "\nLow PDE:\n"); 6291 else if (i == LOWPTDI * NPTEPG) 6292 sbuf_printf(sb, "Low PDE dup:\n"); 6293 else if (i == PTDPTDI * NPTEPG) 6294 sbuf_printf(sb, "Recursive map:\n"); 6295 else if (i == KERNPTDI * NPTEPG) 6296 sbuf_printf(sb, "Kernel base:\n"); 6297 else if (i == TRPTDI * NPTEPG) 6298 sbuf_printf(sb, "Trampoline:\n"); 6299 pde = IdlePTD[sva >> PDRSHIFT]; 6300 if ((pde & PG_V) == 0) { 6301 sva = rounddown2(sva, NBPDR); 6302 sysctl_kmaps_dump(sb, &range, sva); 6303 sva += NBPDR; 6304 i += NPTEPG; 6305 continue; 6306 } 6307 pa = pde & PG_FRAME; 6308 if ((pde & PG_PS) != 0) { 6309 sysctl_kmaps_check(sb, &range, sva, pde, 0); 6310 range.pdes++; 6311 sva += NBPDR; 6312 i += NPTEPG; 6313 continue; 6314 } 6315 for (pt = vtopte(sva), k = 0; k < NPTEPG; i++, k++, pt++, 6316 sva += PAGE_SIZE) { 6317 pte = *pt; 6318 if ((pte & PG_V) == 0) { 6319 sysctl_kmaps_dump(sb, &range, sva); 6320 continue; 6321 } 6322 sysctl_kmaps_check(sb, &range, sva, pde, pte); 6323 range.ptes++; 6324 } 6325 } 6326 6327 error = sbuf_finish(sb); 6328 sbuf_delete(sb); 6329 return (error); 6330 } 6331 6332 #define PMM(a) \ 6333 .pm_##a = __CONCAT(PMTYPE, a), 6334 6335 struct pmap_methods __CONCAT(PMTYPE, methods) = { 6336 PMM(ksetrw) 6337 PMM(remap_lower) 6338 PMM(remap_lowptdi) 6339 PMM(align_superpage) 6340 PMM(quick_enter_page) 6341 PMM(quick_remove_page) 6342 PMM(trm_alloc) 6343 PMM(trm_free) 6344 PMM(get_map_low) 6345 PMM(get_vm_maxuser_address) 6346 PMM(kextract) 6347 PMM(pg_frame) 6348 PMM(sf_buf_map) 6349 PMM(cp_slow0_map) 6350 PMM(get_kcr3) 6351 PMM(get_cr3) 6352 PMM(cmap3) 6353 PMM(basemem_setup) 6354 PMM(set_nx) 6355 PMM(bios16_enter) 6356 PMM(bios16_leave) 6357 PMM(bootstrap) 6358 PMM(is_valid_memattr) 6359 PMM(cache_bits) 6360 PMM(ps_enabled) 6361 PMM(pinit0) 6362 PMM(pinit) 6363 PMM(activate) 6364 PMM(activate_boot) 6365 PMM(advise) 6366 PMM(clear_modify) 6367 PMM(change_attr) 6368 PMM(mincore) 6369 PMM(copy) 6370 PMM(copy_page) 6371 PMM(copy_pages) 6372 PMM(zero_page) 6373 PMM(zero_page_area) 6374 PMM(enter) 6375 PMM(enter_object) 6376 PMM(enter_quick) 6377 PMM(kenter_temporary) 6378 PMM(object_init_pt) 6379 PMM(unwire) 6380 PMM(page_exists_quick) 6381 PMM(page_wired_mappings) 6382 PMM(page_is_mapped) 6383 PMM(remove_pages) 6384 PMM(is_modified) 6385 PMM(is_prefaultable) 6386 PMM(is_referenced) 6387 PMM(remove_write) 6388 PMM(ts_referenced) 6389 PMM(mapdev_attr) 6390 PMM(unmapdev) 6391 PMM(page_set_memattr) 6392 PMM(extract) 6393 PMM(extract_and_hold) 6394 PMM(map) 6395 PMM(qenter) 6396 PMM(qremove) 6397 PMM(release) 6398 PMM(remove) 6399 PMM(protect) 6400 PMM(remove_all) 6401 PMM(init) 6402 PMM(init_pat) 6403 PMM(growkernel) 6404 PMM(invalidate_page) 6405 PMM(invalidate_range) 6406 PMM(invalidate_all) 6407 PMM(invalidate_cache) 6408 PMM(flush_page) 6409 PMM(kenter) 6410 PMM(kremove) 6411 PMM(sysctl_kmaps) 6412 }; 6413