1 /* 2 * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * --- 35 * 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * Copyright (c) 1994 John S. Dyson 39 * All rights reserved. 40 * Copyright (c) 1994 David Greenman 41 * All rights reserved. 42 * 43 * 44 * This code is derived from software contributed to Berkeley by 45 * The Mach Operating System project at Carnegie-Mellon University. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * --- 72 * 73 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 74 * All rights reserved. 75 * 76 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 77 * 78 * Permission to use, copy, modify and distribute this software and 79 * its documentation is hereby granted, provided that both the copyright 80 * notice and this permission notice appear in all copies of the 81 * software, derivative works or modified versions, and any portions 82 * thereof, and that both notices appear in supporting documentation. 83 * 84 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 85 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 86 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 87 * 88 * Carnegie Mellon requests users of this software to return to 89 * 90 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 91 * School of Computer Science 92 * Carnegie Mellon University 93 * Pittsburgh PA 15213-3890 94 * 95 * any improvements or extensions that they make and grant Carnegie the 96 * rights to redistribute these changes. 97 */ 98 99 /* 100 * Page fault handling module. 101 */ 102 103 #include <sys/param.h> 104 #include <sys/systm.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/vnode.h> 108 #include <sys/resourcevar.h> 109 #include <sys/vmmeter.h> 110 #include <sys/vkernel.h> 111 #include <sys/lock.h> 112 #include <sys/sysctl.h> 113 114 #include <cpu/lwbuf.h> 115 116 #include <vm/vm.h> 117 #include <vm/vm_param.h> 118 #include <vm/pmap.h> 119 #include <vm/vm_map.h> 120 #include <vm/vm_object.h> 121 #include <vm/vm_page.h> 122 #include <vm/vm_pageout.h> 123 #include <vm/vm_kern.h> 124 #include <vm/vm_pager.h> 125 #include <vm/vnode_pager.h> 126 #include <vm/swap_pager.h> 127 #include <vm/vm_extern.h> 128 129 #include <vm/vm_page2.h> 130 131 struct faultstate { 132 vm_page_t m; 133 vm_map_backing_t ba; 134 vm_prot_t prot; 135 vm_page_t first_m; 136 vm_map_backing_t first_ba; 137 vm_prot_t first_prot; 138 vm_map_t map; 139 vm_map_entry_t entry; 140 int lookup_still_valid; /* 0=inv 1=valid/rel -1=valid/atomic */ 141 int hardfault; 142 int fault_flags; 143 int shared; 144 int msoftonly; 145 int first_shared; 146 int wflags; 147 int first_ba_held; /* 0=unlocked 1=locked/rel -1=lock/atomic */ 148 struct vnode *vp; 149 }; 150 151 __read_mostly static int debug_fault = 0; 152 SYSCTL_INT(_vm, OID_AUTO, debug_fault, CTLFLAG_RW, &debug_fault, 0, ""); 153 __read_mostly static int debug_cluster = 0; 154 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, ""); 155 #if 0 156 static int virtual_copy_enable = 1; 157 SYSCTL_INT(_vm, OID_AUTO, virtual_copy_enable, CTLFLAG_RW, 158 &virtual_copy_enable, 0, ""); 159 #endif 160 __read_mostly int vm_shared_fault = 1; 161 TUNABLE_INT("vm.shared_fault", &vm_shared_fault); 162 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, 163 &vm_shared_fault, 0, "Allow shared token on vm_object"); 164 __read_mostly static int vm_fault_quick_enable = 1; 165 TUNABLE_INT("vm.fault_quick", &vm_fault_quick_enable); 166 SYSCTL_INT(_vm, OID_AUTO, fault_quick, CTLFLAG_RW, 167 &vm_fault_quick_enable, 0, "Allow fast vm_fault shortcut"); 168 169 /* 170 * Define here for debugging ioctls. Note that these are globals, so 171 * they were cause a ton of cache line bouncing. Only use for debugging 172 * purposes. 173 */ 174 /*#define VM_FAULT_QUICK_DEBUG */ 175 #ifdef VM_FAULT_QUICK_DEBUG 176 static long vm_fault_quick_success_count = 0; 177 SYSCTL_LONG(_vm, OID_AUTO, fault_quick_success_count, CTLFLAG_RW, 178 &vm_fault_quick_success_count, 0, ""); 179 static long vm_fault_quick_failure_count1 = 0; 180 SYSCTL_LONG(_vm, OID_AUTO, fault_quick_failure_count1, CTLFLAG_RW, 181 &vm_fault_quick_failure_count1, 0, ""); 182 static long vm_fault_quick_failure_count2 = 0; 183 SYSCTL_LONG(_vm, OID_AUTO, fault_quick_failure_count2, CTLFLAG_RW, 184 &vm_fault_quick_failure_count2, 0, ""); 185 static long vm_fault_quick_failure_count3 = 0; 186 SYSCTL_LONG(_vm, OID_AUTO, fault_quick_failure_count3, CTLFLAG_RW, 187 &vm_fault_quick_failure_count3, 0, ""); 188 static long vm_fault_quick_failure_count4 = 0; 189 SYSCTL_LONG(_vm, OID_AUTO, fault_quick_failure_count4, CTLFLAG_RW, 190 &vm_fault_quick_failure_count4, 0, ""); 191 #endif 192 193 static int vm_fault_quick(struct faultstate *fs, vm_pindex_t first_pindex, 194 vm_prot_t fault_type); 195 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int); 196 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, 197 vpte_t, int, int); 198 #if 0 199 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *); 200 #endif 201 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry); 202 static void vm_prefault(pmap_t pmap, vm_offset_t addra, 203 vm_map_entry_t entry, int prot, int fault_flags); 204 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 205 vm_map_entry_t entry, int prot, int fault_flags); 206 207 static __inline void 208 release_page(struct faultstate *fs) 209 { 210 vm_page_deactivate(fs->m); 211 vm_page_wakeup(fs->m); 212 fs->m = NULL; 213 } 214 215 static __inline void 216 unlock_map(struct faultstate *fs) 217 { 218 if (fs->ba != fs->first_ba) 219 vm_object_drop(fs->ba->object); 220 if (fs->first_ba && fs->first_ba_held == 1) { 221 vm_object_drop(fs->first_ba->object); 222 fs->first_ba_held = 0; 223 fs->first_ba = NULL; 224 } 225 fs->ba = NULL; 226 227 /* 228 * NOTE: If lookup_still_valid == -1 the map is assumed to be locked 229 * and caller expects it to remain locked atomically. 230 */ 231 if (fs->lookup_still_valid == 1 && fs->map) { 232 vm_map_lookup_done(fs->map, fs->entry, 0); 233 fs->lookup_still_valid = 0; 234 fs->entry = NULL; 235 } 236 } 237 238 /* 239 * Clean up after a successful call to vm_fault_object() so another call 240 * to vm_fault_object() can be made. 241 */ 242 static void 243 cleanup_fault(struct faultstate *fs) 244 { 245 /* 246 * We allocated a junk page for a COW operation that did 247 * not occur, the page must be freed. 248 */ 249 if (fs->ba != fs->first_ba) { 250 KKASSERT(fs->first_shared == 0); 251 252 /* 253 * first_m could be completely valid and we got here 254 * because of a PG_RAM, don't mistakenly free it! 255 */ 256 if ((fs->first_m->valid & VM_PAGE_BITS_ALL) == 257 VM_PAGE_BITS_ALL) { 258 vm_page_wakeup(fs->first_m); 259 } else { 260 vm_page_free(fs->first_m); 261 } 262 vm_object_pip_wakeup(fs->ba->object); 263 fs->first_m = NULL; 264 265 /* 266 * Reset fs->ba (used by vm_fault_vpagetahble() without 267 * calling unlock_map(), so we need a little duplication. 268 */ 269 vm_object_drop(fs->ba->object); 270 fs->ba = fs->first_ba; 271 } 272 } 273 274 static void 275 unlock_things(struct faultstate *fs) 276 { 277 cleanup_fault(fs); 278 unlock_map(fs); 279 if (fs->vp != NULL) { 280 vput(fs->vp); 281 fs->vp = NULL; 282 } 283 } 284 285 #if 0 286 /* 287 * Virtual copy tests. Used by the fault code to determine if a 288 * page can be moved from an orphan vm_object into its shadow 289 * instead of copying its contents. 290 */ 291 static __inline int 292 virtual_copy_test(struct faultstate *fs) 293 { 294 /* 295 * Must be holding exclusive locks 296 */ 297 if (fs->first_shared || fs->shared || virtual_copy_enable == 0) 298 return 0; 299 300 /* 301 * Map, if present, has not changed 302 */ 303 if (fs->map && fs->map_generation != fs->map->timestamp) 304 return 0; 305 306 /* 307 * No refs, except us 308 */ 309 if (fs->ba->object->ref_count != 1) 310 return 0; 311 312 /* 313 * No one else can look this object up 314 */ 315 if (fs->ba->object->handle != NULL) 316 return 0; 317 318 /* 319 * No other ways to look the object up 320 */ 321 if (fs->ba->object->type != OBJT_DEFAULT && 322 fs->ba->object->type != OBJT_SWAP) 323 return 0; 324 325 /* 326 * We don't chase down the shadow chain 327 */ 328 if (fs->ba != fs->first_ba->backing_ba) 329 return 0; 330 331 return 1; 332 } 333 334 static __inline int 335 virtual_copy_ok(struct faultstate *fs) 336 { 337 if (virtual_copy_test(fs)) { 338 /* 339 * Grab the lock and re-test changeable items. 340 */ 341 if (fs->lookup_still_valid == 0 && fs->map) { 342 if (lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT)) 343 return 0; 344 fs->lookup_still_valid = 1; 345 if (virtual_copy_test(fs)) { 346 fs->map_generation = ++fs->map->timestamp; 347 return 1; 348 } 349 fs->lookup_still_valid = 0; 350 lockmgr(&fs->map->lock, LK_RELEASE); 351 } 352 } 353 return 0; 354 } 355 #endif 356 357 /* 358 * TRYPAGER 359 * 360 * Determine if the pager for the current object *might* contain the page. 361 * 362 * We only need to try the pager if this is not a default object (default 363 * objects are zero-fill and have no real pager), and if we are not taking 364 * a wiring fault or if the FS entry is wired. 365 */ 366 #define TRYPAGER(fs) \ 367 (fs->ba->object->type != OBJT_DEFAULT && \ 368 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || \ 369 (fs->wflags & FW_WIRED))) 370 371 /* 372 * vm_fault: 373 * 374 * Handle a page fault occuring at the given address, requiring the given 375 * permissions, in the map specified. If successful, the page is inserted 376 * into the associated physical map. 377 * 378 * NOTE: The given address should be truncated to the proper page address. 379 * 380 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 381 * a standard error specifying why the fault is fatal is returned. 382 * 383 * The map in question must be referenced, and remains so. 384 * The caller may hold no locks. 385 * No other requirements. 386 */ 387 int 388 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) 389 { 390 int result; 391 vm_pindex_t first_pindex; 392 struct faultstate fs; 393 struct lwp *lp; 394 struct proc *p; 395 thread_t td; 396 struct vm_map_ilock ilock; 397 int didilock; 398 int growstack; 399 int retry = 0; 400 int inherit_prot; 401 402 inherit_prot = fault_type & VM_PROT_NOSYNC; 403 fs.hardfault = 0; 404 fs.fault_flags = fault_flags; 405 fs.vp = NULL; 406 fs.shared = vm_shared_fault; 407 fs.first_shared = vm_shared_fault; 408 growstack = 1; 409 410 /* 411 * vm_map interactions 412 */ 413 td = curthread; 414 if ((lp = td->td_lwp) != NULL) 415 lp->lwp_flags |= LWP_PAGING; 416 417 RetryFault: 418 /* 419 * vm_fault_quick() can shortcut us. 420 */ 421 fs.msoftonly = 0; 422 fs.first_ba_held = 0; 423 424 /* 425 * Find the vm_map_entry representing the backing store and resolve 426 * the top level object and page index. This may have the side 427 * effect of executing a copy-on-write on the map entry, 428 * creating a shadow object, or splitting an anonymous entry for 429 * performance, but will not COW any actual VM pages. 430 * 431 * On success fs.map is left read-locked and various other fields 432 * are initialized but not otherwise referenced or locked. 433 * 434 * NOTE! vm_map_lookup will try to upgrade the fault_type to 435 * VM_FAULT_WRITE if the map entry is a virtual page table 436 * and also writable, so we can set the 'A'accessed bit in 437 * the virtual page table entry. 438 */ 439 fs.map = map; 440 result = vm_map_lookup(&fs.map, vaddr, fault_type, 441 &fs.entry, &fs.first_ba, 442 &first_pindex, &fs.first_prot, &fs.wflags); 443 444 /* 445 * If the lookup failed or the map protections are incompatible, 446 * the fault generally fails. 447 * 448 * The failure could be due to TDF_NOFAULT if vm_map_lookup() 449 * tried to do a COW fault. 450 * 451 * If the caller is trying to do a user wiring we have more work 452 * to do. 453 */ 454 if (result != KERN_SUCCESS) { 455 if (result == KERN_FAILURE_NOFAULT) { 456 result = KERN_FAILURE; 457 goto done; 458 } 459 if (result != KERN_PROTECTION_FAILURE || 460 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 461 { 462 if (result == KERN_INVALID_ADDRESS && growstack && 463 map != &kernel_map && curproc != NULL) { 464 result = vm_map_growstack(map, vaddr); 465 if (result == KERN_SUCCESS) { 466 growstack = 0; 467 ++retry; 468 goto RetryFault; 469 } 470 result = KERN_FAILURE; 471 } 472 goto done; 473 } 474 475 /* 476 * If we are user-wiring a r/w segment, and it is COW, then 477 * we need to do the COW operation. Note that we don't 478 * currently COW RO sections now, because it is NOT desirable 479 * to COW .text. We simply keep .text from ever being COW'ed 480 * and take the heat that one cannot debug wired .text sections. 481 * 482 * XXX Try to allow the above by specifying OVERRIDE_WRITE. 483 */ 484 result = vm_map_lookup(&fs.map, vaddr, 485 VM_PROT_READ|VM_PROT_WRITE| 486 VM_PROT_OVERRIDE_WRITE, 487 &fs.entry, &fs.first_ba, 488 &first_pindex, &fs.first_prot, 489 &fs.wflags); 490 if (result != KERN_SUCCESS) { 491 /* could also be KERN_FAILURE_NOFAULT */ 492 result = KERN_FAILURE; 493 goto done; 494 } 495 496 /* 497 * If we don't COW now, on a user wire, the user will never 498 * be able to write to the mapping. If we don't make this 499 * restriction, the bookkeeping would be nearly impossible. 500 * 501 * XXX We have a shared lock, this will have a MP race but 502 * I don't see how it can hurt anything. 503 */ 504 if ((fs.entry->protection & VM_PROT_WRITE) == 0) { 505 atomic_clear_char(&fs.entry->max_protection, 506 VM_PROT_WRITE); 507 } 508 } 509 510 /* 511 * fs.map is read-locked 512 * 513 * Misc checks. Save the map generation number to detect races. 514 */ 515 fs.lookup_still_valid = 1; 516 fs.first_m = NULL; 517 fs.ba = fs.first_ba; /* so unlock_things() works */ 518 fs.prot = fs.first_prot; /* default (used by uksmap) */ 519 520 if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) { 521 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 522 panic("vm_fault: fault on nofault entry, addr: %p", 523 (void *)vaddr); 524 } 525 if ((fs.entry->eflags & MAP_ENTRY_KSTACK) && 526 vaddr >= fs.entry->ba.start && 527 vaddr < fs.entry->ba.start + PAGE_SIZE) { 528 panic("vm_fault: fault on stack guard, addr: %p", 529 (void *)vaddr); 530 } 531 } 532 533 /* 534 * A user-kernel shared map has no VM object and bypasses 535 * everything. We execute the uksmap function with a temporary 536 * fictitious vm_page. The address is directly mapped with no 537 * management. 538 */ 539 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 540 struct vm_page fakem; 541 542 bzero(&fakem, sizeof(fakem)); 543 fakem.pindex = first_pindex; 544 fakem.flags = PG_FICTITIOUS | PG_UNQUEUED; 545 fakem.busy_count = PBUSY_LOCKED; 546 fakem.valid = VM_PAGE_BITS_ALL; 547 fakem.pat_mode = VM_MEMATTR_DEFAULT; 548 if (fs.entry->ba.uksmap(&fs.entry->ba, UKSMAPOP_FAULT, 549 fs.entry->aux.dev, &fakem)) { 550 result = KERN_FAILURE; 551 unlock_things(&fs); 552 goto done2; 553 } 554 pmap_enter(fs.map->pmap, vaddr, &fakem, fs.prot | inherit_prot, 555 (fs.wflags & FW_WIRED), fs.entry); 556 goto done_success; 557 } 558 559 /* 560 * A system map entry may return a NULL object. No object means 561 * no pager means an unrecoverable kernel fault. 562 */ 563 if (fs.first_ba == NULL) { 564 panic("vm_fault: unrecoverable fault at %p in entry %p", 565 (void *)vaddr, fs.entry); 566 } 567 568 /* 569 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 570 * is set. 571 * 572 * Unfortunately a deadlock can occur if we are forced to page-in 573 * from swap, but diving all the way into the vm_pager_get_page() 574 * function to find out is too much. Just check the object type. 575 * 576 * The deadlock is a CAM deadlock on a busy VM page when trying 577 * to finish an I/O if another process gets stuck in 578 * vop_helper_read_shortcut() due to a swap fault. 579 */ 580 if ((td->td_flags & TDF_NOFAULT) && 581 (retry || 582 fs.first_ba->object->type == OBJT_VNODE || 583 fs.first_ba->object->type == OBJT_SWAP || 584 fs.first_ba->backing_ba)) { 585 result = KERN_FAILURE; 586 unlock_things(&fs); 587 goto done2; 588 } 589 590 /* 591 * If the entry is wired we cannot change the page protection. 592 */ 593 if (fs.wflags & FW_WIRED) 594 fault_type = fs.first_prot; 595 596 /* 597 * We generally want to avoid unnecessary exclusive modes on backing 598 * and terminal objects because this can seriously interfere with 599 * heavily fork()'d processes (particularly /bin/sh scripts). 600 * 601 * However, we also want to avoid unnecessary retries due to needed 602 * shared->exclusive promotion for common faults. Exclusive mode is 603 * always needed if any page insertion, rename, or free occurs in an 604 * object (and also indirectly if any I/O is done). 605 * 606 * The main issue here is going to be fs.first_shared. If the 607 * first_object has a backing object which isn't shadowed and the 608 * process is single-threaded we might as well use an exclusive 609 * lock/chain right off the bat. 610 */ 611 #if 0 612 /* WORK IN PROGRESS, CODE REMOVED */ 613 if (fs.first_shared && fs.first_object->backing_object && 614 LIST_EMPTY(&fs.first_object->shadow_head) && 615 td->td_proc && td->td_proc->p_nthreads == 1) { 616 fs.first_shared = 0; 617 } 618 #endif 619 620 /* 621 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 622 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 623 * we can try shared first. 624 */ 625 if (fault_flags & VM_FAULT_UNSWAP) 626 fs.first_shared = 0; 627 628 /* 629 * Try to shortcut the entire mess and run the fault lockless. 630 */ 631 if (vm_fault_quick_enable && 632 vm_fault_quick(&fs, first_pindex, fault_type) == KERN_SUCCESS) { 633 didilock = 0; 634 fault_flags &= ~VM_FAULT_BURST; 635 goto success; 636 } 637 638 /* 639 * Exclusive heuristic (alloc page vs page exists) 640 */ 641 if (fs.first_ba->flags & VM_MAP_BACK_EXCL_HEUR) 642 fs.first_shared = 0; 643 644 /* 645 * Obtain a top-level object lock, shared or exclusive depending 646 * on fs.first_shared. If a shared lock winds up being insufficient 647 * we will retry with an exclusive lock. 648 * 649 * The vnode pager lock is always shared. 650 */ 651 if (fs.first_shared) 652 vm_object_hold_shared(fs.first_ba->object); 653 else 654 vm_object_hold(fs.first_ba->object); 655 if (fs.vp == NULL) 656 fs.vp = vnode_pager_lock(fs.first_ba); 657 fs.first_ba_held = 1; 658 659 /* 660 * The page we want is at (first_object, first_pindex), but if the 661 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the 662 * page table to figure out the actual pindex. 663 * 664 * NOTE! DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION 665 * ONLY 666 */ 667 didilock = 0; 668 if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) { 669 vm_map_interlock(fs.map, &ilock, vaddr, vaddr + PAGE_SIZE); 670 didilock = 1; 671 result = vm_fault_vpagetable(&fs, &first_pindex, 672 fs.entry->aux.master_pde, 673 fault_type, 1); 674 if (result == KERN_TRY_AGAIN) { 675 vm_map_deinterlock(fs.map, &ilock); 676 ++retry; 677 goto RetryFault; 678 } 679 if (result != KERN_SUCCESS) { 680 vm_map_deinterlock(fs.map, &ilock); 681 goto done; 682 } 683 } 684 685 /* 686 * Now we have the actual (object, pindex), fault in the page. If 687 * vm_fault_object() fails it will unlock and deallocate the FS 688 * data. If it succeeds everything remains locked and fs->ba->object 689 * will have an additional PIP count if fs->ba != fs->first_ba. 690 * 691 * vm_fault_object will set fs->prot for the pmap operation. It is 692 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the 693 * page can be safely written. However, it will force a read-only 694 * mapping for a read fault if the memory is managed by a virtual 695 * page table. 696 * 697 * If the fault code uses the shared object lock shortcut 698 * we must not try to burst (we can't allocate VM pages). 699 */ 700 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 701 702 if (debug_fault > 0) { 703 --debug_fault; 704 kprintf("VM_FAULT result %d addr=%jx type=%02x flags=%02x " 705 "fs.m=%p fs.prot=%02x fs.wflags=%02x fs.entry=%p\n", 706 result, (intmax_t)vaddr, fault_type, fault_flags, 707 fs.m, fs.prot, fs.wflags, fs.entry); 708 } 709 710 if (result == KERN_TRY_AGAIN) { 711 if (didilock) 712 vm_map_deinterlock(fs.map, &ilock); 713 ++retry; 714 goto RetryFault; 715 } 716 if (result != KERN_SUCCESS) { 717 if (didilock) 718 vm_map_deinterlock(fs.map, &ilock); 719 goto done; 720 } 721 722 success: 723 /* 724 * On success vm_fault_object() does not unlock or deallocate, and fs.m 725 * will contain a busied page. It does drop fs->ba if appropriate. 726 * 727 * Enter the page into the pmap and do pmap-related adjustments. 728 * 729 * WARNING! Soft-busied fs.m's can only be manipulated in limited 730 * ways. 731 */ 732 KKASSERT(fs.lookup_still_valid != 0); 733 vm_page_flag_set(fs.m, PG_REFERENCED); 734 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot | inherit_prot, 735 fs.wflags & FW_WIRED, fs.entry); 736 737 if (didilock) 738 vm_map_deinterlock(fs.map, &ilock); 739 740 /* 741 * If the page is not wired down, then put it where the pageout daemon 742 * can find it. 743 * 744 * NOTE: We cannot safely wire, unwire, or adjust queues for a 745 * soft-busied page. 746 */ 747 if (fs.msoftonly) { 748 KKASSERT(fs.m->busy_count & PBUSY_MASK); 749 KKASSERT((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0); 750 vm_page_sbusy_drop(fs.m); 751 } else { 752 if (fs.fault_flags & VM_FAULT_WIRE_MASK) { 753 if (fs.wflags & FW_WIRED) 754 vm_page_wire(fs.m); 755 else 756 vm_page_unwire(fs.m, 1); 757 } else { 758 vm_page_activate(fs.m); 759 } 760 KKASSERT(fs.m->busy_count & PBUSY_LOCKED); 761 vm_page_wakeup(fs.m); 762 } 763 764 /* 765 * Burst in a few more pages if possible. The fs.map should still 766 * be locked. To avoid interlocking against a vnode->getblk 767 * operation we had to be sure to unbusy our primary vm_page above 768 * first. 769 * 770 * A normal burst can continue down backing store, only execute 771 * if we are holding an exclusive lock, otherwise the exclusive 772 * locks the burst code gets might cause excessive SMP collisions. 773 * 774 * A quick burst can be utilized when there is no backing object 775 * (i.e. a shared file mmap). 776 */ 777 if ((fault_flags & VM_FAULT_BURST) && 778 (fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 && 779 (fs.wflags & FW_WIRED) == 0) { 780 if (fs.first_shared == 0 && fs.shared == 0) { 781 vm_prefault(fs.map->pmap, vaddr, 782 fs.entry, fs.prot, fault_flags); 783 } else { 784 vm_prefault_quick(fs.map->pmap, vaddr, 785 fs.entry, fs.prot, fault_flags); 786 } 787 } 788 789 done_success: 790 mycpu->gd_cnt.v_vm_faults++; 791 if (td->td_lwp) 792 ++td->td_lwp->lwp_ru.ru_minflt; 793 794 /* 795 * Unlock everything, and return 796 */ 797 unlock_things(&fs); 798 799 if (td->td_lwp) { 800 if (fs.hardfault) { 801 td->td_lwp->lwp_ru.ru_majflt++; 802 } else { 803 td->td_lwp->lwp_ru.ru_minflt++; 804 } 805 } 806 807 /*vm_object_deallocate(fs.first_ba->object);*/ 808 /*fs.m = NULL; */ 809 810 result = KERN_SUCCESS; 811 done: 812 if (fs.first_ba && fs.first_ba->object && fs.first_ba_held == 1) { 813 vm_object_drop(fs.first_ba->object); 814 fs.first_ba_held = 0; 815 } 816 done2: 817 if (lp) 818 lp->lwp_flags &= ~LWP_PAGING; 819 820 #if !defined(NO_SWAPPING) 821 /* 822 * Check the process RSS limit and force deactivation and 823 * (asynchronous) paging if necessary. This is a complex operation, 824 * only do it for direct user-mode faults, for now. 825 * 826 * To reduce overhead implement approximately a ~16MB hysteresis. 827 */ 828 p = td->td_proc; 829 if ((fault_flags & VM_FAULT_USERMODE) && lp && 830 p->p_limit && map->pmap && vm_pageout_memuse_mode >= 1 && 831 map != &kernel_map) { 832 vm_pindex_t limit; 833 vm_pindex_t size; 834 835 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 836 p->p_rlimit[RLIMIT_RSS].rlim_max)); 837 size = pmap_resident_tlnw_count(map->pmap); 838 if (limit >= 0 && size > 4096 && size - 4096 >= limit) { 839 vm_pageout_map_deactivate_pages(map, limit); 840 } 841 } 842 #endif 843 844 if (result != KERN_SUCCESS && debug_fault < 0) { 845 kprintf("VM_FAULT %d:%d (%s) result %d " 846 "addr=%jx type=%02x flags=%02x " 847 "fs.m=%p fs.prot=%02x fs.wflags=%02x fs.entry=%p\n", 848 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 849 (curthread->td_lwp ? curthread->td_lwp->lwp_tid : -1), 850 curthread->td_comm, 851 result, 852 (intmax_t)vaddr, fault_type, fault_flags, 853 fs.m, fs.prot, fs.wflags, fs.entry); 854 while (debug_fault < 0 && (debug_fault & 1)) 855 tsleep(&debug_fault, 0, "DEBUG", hz); 856 } 857 858 return (result); 859 } 860 861 /* 862 * Attempt a lockless vm_fault() shortcut. The stars have to align for this 863 * to work. But if it does we can get our page only soft-busied and not 864 * have to touch the vm_object or vnode locks at all. 865 */ 866 static 867 int 868 vm_fault_quick(struct faultstate *fs, vm_pindex_t first_pindex, 869 vm_prot_t fault_type) 870 { 871 vm_page_t m; 872 vm_object_t obj; /* NOT LOCKED */ 873 874 /* 875 * Don't waste time if the object is only being used by one vm_map. 876 */ 877 obj = fs->first_ba->object; 878 if (obj->flags & OBJ_ONEMAPPING) 879 return KERN_FAILURE; 880 881 /* 882 * This will try to wire/unwire a page, which can't be done with 883 * a soft-busied page. 884 */ 885 if (fs->fault_flags & VM_FAULT_WIRE_MASK) 886 return KERN_FAILURE; 887 888 /* 889 * Ick, can't handle this 890 */ 891 if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) { 892 #ifdef VM_FAULT_QUICK_DEBUG 893 ++vm_fault_quick_failure_count1; 894 #endif 895 return KERN_FAILURE; 896 } 897 898 /* 899 * Ok, try to get the vm_page quickly via the hash table. The 900 * page will be soft-busied on success (NOT hard-busied). 901 */ 902 m = vm_page_hash_get(obj, first_pindex); 903 if (m == NULL) { 904 #ifdef VM_FAULT_QUICK_DEBUG 905 ++vm_fault_quick_failure_count2; 906 #endif 907 return KERN_FAILURE; 908 } 909 if ((obj->flags & OBJ_DEAD) || 910 m->valid != VM_PAGE_BITS_ALL || 911 m->queue - m->pc != PQ_ACTIVE || 912 (m->flags & PG_SWAPPED)) { 913 vm_page_sbusy_drop(m); 914 #ifdef VM_FAULT_QUICK_DEBUG 915 ++vm_fault_quick_failure_count3; 916 #endif 917 return KERN_FAILURE; 918 } 919 920 /* 921 * The page is already fully valid, ACTIVE, and is not PG_SWAPPED. 922 * 923 * Don't map the page writable when emulating the dirty bit, a 924 * fault must be taken for proper emulation (vkernel). 925 */ 926 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 927 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 928 if ((fault_type & VM_PROT_WRITE) == 0) 929 fs->prot &= ~VM_PROT_WRITE; 930 } 931 932 /* 933 * If this is a write fault the object and the page must already 934 * be writable. Since we don't hold an object lock and only a 935 * soft-busy on the page, we cannot manipulate the object or 936 * the page state (other than the page queue). 937 */ 938 if (fs->prot & VM_PROT_WRITE) { 939 if ((obj->flags & (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY)) != 940 (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY) || 941 m->dirty != VM_PAGE_BITS_ALL) { 942 vm_page_sbusy_drop(m); 943 #ifdef VM_FAULT_QUICK_DEBUG 944 ++vm_fault_quick_failure_count4; 945 #endif 946 return KERN_FAILURE; 947 } 948 vm_set_nosync(m, fs->entry); 949 } 950 951 /* 952 * Even though we are only soft-busied we can still move pages 953 * around in the normal queue(s). The soft-busy prevents the 954 * page from being removed from the object, etc (normal operation). 955 * 956 * However, in this fast path it is excessively important to avoid 957 * any hard locks, so we use a special passive version of activate. 958 */ 959 vm_page_soft_activate(m); 960 fs->m = m; 961 fs->msoftonly = 1; 962 #ifdef VM_FAULT_QUICK_DEBUG 963 ++vm_fault_quick_success_count; 964 #endif 965 966 return KERN_SUCCESS; 967 } 968 969 /* 970 * Fault in the specified virtual address in the current process map, 971 * returning a held VM page or NULL. See vm_fault_page() for more 972 * information. 973 * 974 * No requirements. 975 */ 976 vm_page_t 977 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, 978 int *errorp, int *busyp) 979 { 980 struct lwp *lp = curthread->td_lwp; 981 vm_page_t m; 982 983 m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 984 fault_type, VM_FAULT_NORMAL, 985 errorp, busyp); 986 return(m); 987 } 988 989 /* 990 * Fault in the specified virtual address in the specified map, doing all 991 * necessary manipulation of the object store and all necessary I/O. Return 992 * a held VM page or NULL, and set *errorp. The related pmap is not 993 * updated. 994 * 995 * If busyp is not NULL then *busyp will be set to TRUE if this routine 996 * decides to return a busied page (aka VM_PROT_WRITE), or FALSE if it 997 * does not (VM_PROT_WRITE not specified or busyp is NULL). If busyp is 998 * NULL the returned page is only held. 999 * 1000 * If the caller has no intention of writing to the page's contents, busyp 1001 * can be passed as NULL along with VM_PROT_WRITE to force a COW operation 1002 * without busying the page. 1003 * 1004 * The returned page will also be marked PG_REFERENCED. 1005 * 1006 * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an 1007 * error will be returned. 1008 * 1009 * No requirements. 1010 */ 1011 vm_page_t 1012 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1013 int fault_flags, int *errorp, int *busyp) 1014 { 1015 vm_pindex_t first_pindex; 1016 struct faultstate fs; 1017 int result; 1018 int retry; 1019 int growstack; 1020 int didcow; 1021 vm_prot_t orig_fault_type = fault_type; 1022 1023 retry = 0; 1024 didcow = 0; 1025 fs.hardfault = 0; 1026 fs.fault_flags = fault_flags; 1027 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 1028 1029 /* 1030 * Dive the pmap (concurrency possible). If we find the 1031 * appropriate page we can terminate early and quickly. 1032 * 1033 * This works great for normal programs but will always return 1034 * NULL for host lookups of vkernel maps in VMM mode. 1035 * 1036 * NOTE: pmap_fault_page_quick() might not busy the page. If 1037 * VM_PROT_WRITE is set in fault_type and pmap_fault_page_quick() 1038 * returns non-NULL, it will safely dirty the returned vm_page_t 1039 * for us. We cannot safely dirty it here (it might not be 1040 * busy). 1041 */ 1042 fs.m = pmap_fault_page_quick(map->pmap, vaddr, fault_type, busyp); 1043 if (fs.m) { 1044 *errorp = 0; 1045 return(fs.m); 1046 } 1047 1048 /* 1049 * Otherwise take a concurrency hit and do a formal page 1050 * fault. 1051 */ 1052 fs.vp = NULL; 1053 fs.shared = vm_shared_fault; 1054 fs.first_shared = vm_shared_fault; 1055 fs.msoftonly = 0; 1056 growstack = 1; 1057 1058 /* 1059 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 1060 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 1061 * we can try shared first. 1062 */ 1063 if (fault_flags & VM_FAULT_UNSWAP) { 1064 fs.first_shared = 0; 1065 } 1066 1067 RetryFault: 1068 /* 1069 * Find the vm_map_entry representing the backing store and resolve 1070 * the top level object and page index. This may have the side 1071 * effect of executing a copy-on-write on the map entry and/or 1072 * creating a shadow object, but will not COW any actual VM pages. 1073 * 1074 * On success fs.map is left read-locked and various other fields 1075 * are initialized but not otherwise referenced or locked. 1076 * 1077 * NOTE! vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE 1078 * if the map entry is a virtual page table and also writable, 1079 * so we can set the 'A'accessed bit in the virtual page table 1080 * entry. 1081 */ 1082 fs.map = map; 1083 fs.first_ba_held = 0; 1084 result = vm_map_lookup(&fs.map, vaddr, fault_type, 1085 &fs.entry, &fs.first_ba, 1086 &first_pindex, &fs.first_prot, &fs.wflags); 1087 1088 if (result != KERN_SUCCESS) { 1089 if (result == KERN_FAILURE_NOFAULT) { 1090 *errorp = KERN_FAILURE; 1091 fs.m = NULL; 1092 goto done; 1093 } 1094 if (result != KERN_PROTECTION_FAILURE || 1095 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 1096 { 1097 if (result == KERN_INVALID_ADDRESS && growstack && 1098 map != &kernel_map && curproc != NULL) { 1099 result = vm_map_growstack(map, vaddr); 1100 if (result == KERN_SUCCESS) { 1101 growstack = 0; 1102 ++retry; 1103 goto RetryFault; 1104 } 1105 result = KERN_FAILURE; 1106 } 1107 fs.m = NULL; 1108 *errorp = result; 1109 goto done; 1110 } 1111 1112 /* 1113 * If we are user-wiring a r/w segment, and it is COW, then 1114 * we need to do the COW operation. Note that we don't 1115 * currently COW RO sections now, because it is NOT desirable 1116 * to COW .text. We simply keep .text from ever being COW'ed 1117 * and take the heat that one cannot debug wired .text sections. 1118 */ 1119 result = vm_map_lookup(&fs.map, vaddr, 1120 VM_PROT_READ|VM_PROT_WRITE| 1121 VM_PROT_OVERRIDE_WRITE, 1122 &fs.entry, &fs.first_ba, 1123 &first_pindex, &fs.first_prot, 1124 &fs.wflags); 1125 if (result != KERN_SUCCESS) { 1126 /* could also be KERN_FAILURE_NOFAULT */ 1127 *errorp = KERN_FAILURE; 1128 fs.m = NULL; 1129 goto done; 1130 } 1131 1132 /* 1133 * If we don't COW now, on a user wire, the user will never 1134 * be able to write to the mapping. If we don't make this 1135 * restriction, the bookkeeping would be nearly impossible. 1136 * 1137 * XXX We have a shared lock, this will have a MP race but 1138 * I don't see how it can hurt anything. 1139 */ 1140 if ((fs.entry->protection & VM_PROT_WRITE) == 0) { 1141 atomic_clear_char(&fs.entry->max_protection, 1142 VM_PROT_WRITE); 1143 } 1144 } 1145 1146 /* 1147 * fs.map is read-locked 1148 * 1149 * Misc checks. Save the map generation number to detect races. 1150 */ 1151 fs.lookup_still_valid = 1; 1152 fs.first_m = NULL; 1153 fs.ba = fs.first_ba; 1154 1155 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 1156 panic("vm_fault: fault on nofault entry, addr: %lx", 1157 (u_long)vaddr); 1158 } 1159 1160 /* 1161 * A user-kernel shared map has no VM object and bypasses 1162 * everything. We execute the uksmap function with a temporary 1163 * fictitious vm_page. The address is directly mapped with no 1164 * management. 1165 */ 1166 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 1167 struct vm_page fakem; 1168 1169 bzero(&fakem, sizeof(fakem)); 1170 fakem.pindex = first_pindex; 1171 fakem.flags = PG_FICTITIOUS | PG_UNQUEUED; 1172 fakem.busy_count = PBUSY_LOCKED; 1173 fakem.valid = VM_PAGE_BITS_ALL; 1174 fakem.pat_mode = VM_MEMATTR_DEFAULT; 1175 if (fs.entry->ba.uksmap(&fs.entry->ba, UKSMAPOP_FAULT, 1176 fs.entry->aux.dev, &fakem)) { 1177 *errorp = KERN_FAILURE; 1178 fs.m = NULL; 1179 unlock_things(&fs); 1180 goto done2; 1181 } 1182 fs.m = PHYS_TO_VM_PAGE(fakem.phys_addr); 1183 vm_page_hold(fs.m); 1184 if (busyp) 1185 *busyp = 0; /* don't need to busy R or W */ 1186 unlock_things(&fs); 1187 *errorp = 0; 1188 goto done; 1189 } 1190 1191 1192 /* 1193 * A system map entry may return a NULL object. No object means 1194 * no pager means an unrecoverable kernel fault. 1195 */ 1196 if (fs.first_ba == NULL) { 1197 panic("vm_fault: unrecoverable fault at %p in entry %p", 1198 (void *)vaddr, fs.entry); 1199 } 1200 1201 /* 1202 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 1203 * is set. 1204 * 1205 * Unfortunately a deadlock can occur if we are forced to page-in 1206 * from swap, but diving all the way into the vm_pager_get_page() 1207 * function to find out is too much. Just check the object type. 1208 */ 1209 if ((curthread->td_flags & TDF_NOFAULT) && 1210 (retry || 1211 fs.first_ba->object->type == OBJT_VNODE || 1212 fs.first_ba->object->type == OBJT_SWAP || 1213 fs.first_ba->backing_ba)) { 1214 *errorp = KERN_FAILURE; 1215 unlock_things(&fs); 1216 fs.m = NULL; 1217 goto done2; 1218 } 1219 1220 /* 1221 * If the entry is wired we cannot change the page protection. 1222 */ 1223 if (fs.wflags & FW_WIRED) 1224 fault_type = fs.first_prot; 1225 1226 /* 1227 * Make a reference to this object to prevent its disposal while we 1228 * are messing with it. Once we have the reference, the map is free 1229 * to be diddled. Since objects reference their shadows (and copies), 1230 * they will stay around as well. 1231 * 1232 * The reference should also prevent an unexpected collapse of the 1233 * parent that might move pages from the current object into the 1234 * parent unexpectedly, resulting in corruption. 1235 * 1236 * Bump the paging-in-progress count to prevent size changes (e.g. 1237 * truncation operations) during I/O. This must be done after 1238 * obtaining the vnode lock in order to avoid possible deadlocks. 1239 */ 1240 if (fs.first_ba->flags & VM_MAP_BACK_EXCL_HEUR) 1241 fs.first_shared = 0; 1242 1243 if (fs.first_shared) 1244 vm_object_hold_shared(fs.first_ba->object); 1245 else 1246 vm_object_hold(fs.first_ba->object); 1247 fs.first_ba_held = 1; 1248 if (fs.vp == NULL) 1249 fs.vp = vnode_pager_lock(fs.first_ba); /* shared */ 1250 1251 /* 1252 * The page we want is at (first_object, first_pindex), but if the 1253 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the 1254 * page table to figure out the actual pindex. 1255 * 1256 * NOTE! DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION 1257 * ONLY 1258 */ 1259 if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) { 1260 result = vm_fault_vpagetable(&fs, &first_pindex, 1261 fs.entry->aux.master_pde, 1262 fault_type, 1); 1263 if (result == KERN_TRY_AGAIN) { 1264 ++retry; 1265 goto RetryFault; 1266 } 1267 if (result != KERN_SUCCESS) { 1268 *errorp = result; 1269 fs.m = NULL; 1270 goto done; 1271 } 1272 } 1273 1274 /* 1275 * Now we have the actual (object, pindex), fault in the page. If 1276 * vm_fault_object() fails it will unlock and deallocate the FS 1277 * data. If it succeeds everything remains locked and fs->ba->object 1278 * will have an additinal PIP count if fs->ba != fs->first_ba. 1279 */ 1280 fs.m = NULL; 1281 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 1282 1283 if (result == KERN_TRY_AGAIN) { 1284 KKASSERT(fs.first_ba_held == 0); 1285 ++retry; 1286 didcow |= fs.wflags & FW_DIDCOW; 1287 goto RetryFault; 1288 } 1289 if (result != KERN_SUCCESS) { 1290 *errorp = result; 1291 fs.m = NULL; 1292 goto done; 1293 } 1294 1295 if ((orig_fault_type & VM_PROT_WRITE) && 1296 (fs.prot & VM_PROT_WRITE) == 0) { 1297 *errorp = KERN_PROTECTION_FAILURE; 1298 unlock_things(&fs); 1299 fs.m = NULL; 1300 goto done; 1301 } 1302 1303 /* 1304 * Generally speaking we don't want to update the pmap because 1305 * this routine can be called many times for situations that do 1306 * not require updating the pmap, not to mention the page might 1307 * already be in the pmap. 1308 * 1309 * However, if our vm_map_lookup() results in a COW, we need to 1310 * at least remove the pte from the pmap to guarantee proper 1311 * visibility of modifications made to the process. For example, 1312 * modifications made by vkernel uiocopy/related routines and 1313 * modifications made by ptrace(). 1314 */ 1315 vm_page_flag_set(fs.m, PG_REFERENCED); 1316 #if 0 1317 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1318 fs.wflags & FW_WIRED, NULL); 1319 mycpu->gd_cnt.v_vm_faults++; 1320 if (curthread->td_lwp) 1321 ++curthread->td_lwp->lwp_ru.ru_minflt; 1322 #endif 1323 if ((fs.wflags | didcow) | FW_DIDCOW) { 1324 pmap_remove(fs.map->pmap, 1325 vaddr & ~PAGE_MASK, 1326 (vaddr & ~PAGE_MASK) + PAGE_SIZE); 1327 } 1328 1329 /* 1330 * On success vm_fault_object() does not unlock or deallocate, and fs.m 1331 * will contain a busied page. So we must unlock here after having 1332 * messed with the pmap. 1333 */ 1334 unlock_things(&fs); 1335 1336 /* 1337 * Return a held page. We are not doing any pmap manipulation so do 1338 * not set PG_MAPPED. However, adjust the page flags according to 1339 * the fault type because the caller may not use a managed pmapping 1340 * (so we don't want to lose the fact that the page will be dirtied 1341 * if a write fault was specified). 1342 */ 1343 if (fault_type & VM_PROT_WRITE) 1344 vm_page_dirty(fs.m); 1345 vm_page_activate(fs.m); 1346 1347 if (curthread->td_lwp) { 1348 if (fs.hardfault) { 1349 curthread->td_lwp->lwp_ru.ru_majflt++; 1350 } else { 1351 curthread->td_lwp->lwp_ru.ru_minflt++; 1352 } 1353 } 1354 1355 /* 1356 * Unlock everything, and return the held or busied page. 1357 */ 1358 if (busyp) { 1359 if (fault_type & VM_PROT_WRITE) { 1360 vm_page_dirty(fs.m); 1361 *busyp = 1; 1362 } else { 1363 *busyp = 0; 1364 vm_page_hold(fs.m); 1365 vm_page_wakeup(fs.m); 1366 } 1367 } else { 1368 vm_page_hold(fs.m); 1369 vm_page_wakeup(fs.m); 1370 } 1371 /*vm_object_deallocate(fs.first_ba->object);*/ 1372 *errorp = 0; 1373 1374 done: 1375 KKASSERT(fs.first_ba_held == 0); 1376 done2: 1377 return(fs.m); 1378 } 1379 1380 /* 1381 * Fault in the specified (object,offset), dirty the returned page as 1382 * needed. If the requested fault_type cannot be done NULL and an 1383 * error is returned. 1384 * 1385 * A held (but not busied) page is returned. 1386 * 1387 * The passed in object must be held as specified by the shared 1388 * argument. 1389 */ 1390 vm_page_t 1391 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset, 1392 vm_prot_t fault_type, int fault_flags, 1393 int *sharedp, int *errorp) 1394 { 1395 int result; 1396 vm_pindex_t first_pindex; 1397 struct faultstate fs; 1398 struct vm_map_entry entry; 1399 1400 /* 1401 * Since we aren't actually faulting the page into a 1402 * pmap we can just fake the entry.ba. 1403 */ 1404 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1405 bzero(&entry, sizeof(entry)); 1406 entry.maptype = VM_MAPTYPE_NORMAL; 1407 entry.protection = entry.max_protection = fault_type; 1408 entry.ba.backing_ba = NULL; 1409 entry.ba.object = object; 1410 entry.ba.offset = 0; 1411 1412 fs.hardfault = 0; 1413 fs.fault_flags = fault_flags; 1414 fs.map = NULL; 1415 fs.shared = vm_shared_fault; 1416 fs.first_shared = *sharedp; 1417 fs.msoftonly = 0; 1418 fs.vp = NULL; 1419 fs.first_ba_held = -1; /* object held across call, prevent drop */ 1420 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 1421 1422 /* 1423 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 1424 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 1425 * we can try shared first. 1426 */ 1427 if (fs.first_shared && (fault_flags & VM_FAULT_UNSWAP)) { 1428 fs.first_shared = 0; 1429 vm_object_upgrade(object); 1430 } 1431 1432 /* 1433 * Retry loop as needed (typically for shared->exclusive transitions) 1434 */ 1435 RetryFault: 1436 *sharedp = fs.first_shared; 1437 first_pindex = OFF_TO_IDX(offset); 1438 fs.first_ba = &entry.ba; 1439 fs.ba = fs.first_ba; 1440 fs.entry = &entry; 1441 fs.first_prot = fault_type; 1442 fs.wflags = 0; 1443 1444 /* 1445 * Make a reference to this object to prevent its disposal while we 1446 * are messing with it. Once we have the reference, the map is free 1447 * to be diddled. Since objects reference their shadows (and copies), 1448 * they will stay around as well. 1449 * 1450 * The reference should also prevent an unexpected collapse of the 1451 * parent that might move pages from the current object into the 1452 * parent unexpectedly, resulting in corruption. 1453 * 1454 * Bump the paging-in-progress count to prevent size changes (e.g. 1455 * truncation operations) during I/O. This must be done after 1456 * obtaining the vnode lock in order to avoid possible deadlocks. 1457 */ 1458 if (fs.vp == NULL) 1459 fs.vp = vnode_pager_lock(fs.first_ba); 1460 1461 fs.lookup_still_valid = 1; 1462 fs.first_m = NULL; 1463 1464 #if 0 1465 /* XXX future - ability to operate on VM object using vpagetable */ 1466 if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) { 1467 result = vm_fault_vpagetable(&fs, &first_pindex, 1468 fs.entry->aux.master_pde, 1469 fault_type, 0); 1470 if (result == KERN_TRY_AGAIN) { 1471 if (fs.first_shared == 0 && *sharedp) 1472 vm_object_upgrade(object); 1473 goto RetryFault; 1474 } 1475 if (result != KERN_SUCCESS) { 1476 *errorp = result; 1477 return (NULL); 1478 } 1479 } 1480 #endif 1481 1482 /* 1483 * Now we have the actual (object, pindex), fault in the page. If 1484 * vm_fault_object() fails it will unlock and deallocate the FS 1485 * data. If it succeeds everything remains locked and fs->ba->object 1486 * will have an additinal PIP count if fs->ba != fs->first_ba. 1487 * 1488 * On KERN_TRY_AGAIN vm_fault_object() leaves fs.first_ba intact. 1489 * We may have to upgrade its lock to handle the requested fault. 1490 */ 1491 result = vm_fault_object(&fs, first_pindex, fault_type, 0); 1492 1493 if (result == KERN_TRY_AGAIN) { 1494 if (fs.first_shared == 0 && *sharedp) 1495 vm_object_upgrade(object); 1496 goto RetryFault; 1497 } 1498 if (result != KERN_SUCCESS) { 1499 *errorp = result; 1500 return(NULL); 1501 } 1502 1503 if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) { 1504 *errorp = KERN_PROTECTION_FAILURE; 1505 unlock_things(&fs); 1506 return(NULL); 1507 } 1508 1509 /* 1510 * On success vm_fault_object() does not unlock or deallocate, so we 1511 * do it here. Note that the returned fs.m will be busied. 1512 */ 1513 unlock_things(&fs); 1514 1515 /* 1516 * Return a held page. We are not doing any pmap manipulation so do 1517 * not set PG_MAPPED. However, adjust the page flags according to 1518 * the fault type because the caller may not use a managed pmapping 1519 * (so we don't want to lose the fact that the page will be dirtied 1520 * if a write fault was specified). 1521 */ 1522 vm_page_hold(fs.m); 1523 vm_page_activate(fs.m); 1524 if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY)) 1525 vm_page_dirty(fs.m); 1526 if (fault_flags & VM_FAULT_UNSWAP) 1527 swap_pager_unswapped(fs.m); 1528 1529 /* 1530 * Indicate that the page was accessed. 1531 */ 1532 vm_page_flag_set(fs.m, PG_REFERENCED); 1533 1534 if (curthread->td_lwp) { 1535 if (fs.hardfault) { 1536 curthread->td_lwp->lwp_ru.ru_majflt++; 1537 } else { 1538 curthread->td_lwp->lwp_ru.ru_minflt++; 1539 } 1540 } 1541 1542 /* 1543 * Unlock everything, and return the held page. 1544 */ 1545 vm_page_wakeup(fs.m); 1546 /*vm_object_deallocate(fs.first_ba->object);*/ 1547 1548 *errorp = 0; 1549 return(fs.m); 1550 } 1551 1552 /* 1553 * Translate the virtual page number (first_pindex) that is relative 1554 * to the address space into a logical page number that is relative to the 1555 * backing object. Use the virtual page table pointed to by (vpte). 1556 * 1557 * Possibly downgrade the protection based on the vpte bits. 1558 * 1559 * This implements an N-level page table. Any level can terminate the 1560 * scan by setting VPTE_PS. A linear mapping is accomplished by setting 1561 * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP). 1562 */ 1563 static 1564 int 1565 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex, 1566 vpte_t vpte, int fault_type, int allow_nofault) 1567 { 1568 struct lwbuf *lwb; 1569 struct lwbuf lwb_cache; 1570 int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */ 1571 int result; 1572 vpte_t *ptep; 1573 1574 ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_ba->object)); 1575 for (;;) { 1576 /* 1577 * We cannot proceed if the vpte is not valid, not readable 1578 * for a read fault, not writable for a write fault, or 1579 * not executable for an instruction execution fault. 1580 */ 1581 if ((vpte & VPTE_V) == 0) { 1582 unlock_things(fs); 1583 return (KERN_FAILURE); 1584 } 1585 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW) == 0) { 1586 unlock_things(fs); 1587 return (KERN_FAILURE); 1588 } 1589 if ((fault_type & VM_PROT_EXECUTE) && (vpte & VPTE_NX)) { 1590 unlock_things(fs); 1591 return (KERN_FAILURE); 1592 } 1593 if ((vpte & VPTE_PS) || vshift == 0) 1594 break; 1595 1596 /* 1597 * Get the page table page. Nominally we only read the page 1598 * table, but since we are actively setting VPTE_M and VPTE_A, 1599 * tell vm_fault_object() that we are writing it. 1600 * 1601 * There is currently no real need to optimize this. 1602 */ 1603 result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT, 1604 VM_PROT_READ|VM_PROT_WRITE, 1605 allow_nofault); 1606 if (result != KERN_SUCCESS) 1607 return (result); 1608 1609 /* 1610 * Process the returned fs.m and look up the page table 1611 * entry in the page table page. 1612 */ 1613 vshift -= VPTE_PAGE_BITS; 1614 lwb = lwbuf_alloc(fs->m, &lwb_cache); 1615 ptep = ((vpte_t *)lwbuf_kva(lwb) + 1616 ((*pindex >> vshift) & VPTE_PAGE_MASK)); 1617 vm_page_activate(fs->m); 1618 1619 /* 1620 * Page table write-back - entire operation including 1621 * validation of the pte must be atomic to avoid races 1622 * against the vkernel changing the pte. 1623 * 1624 * If the vpte is valid for the* requested operation, do 1625 * a write-back to the page table. 1626 * 1627 * XXX VPTE_M is not set properly for page directory pages. 1628 * It doesn't get set in the page directory if the page table 1629 * is modified during a read access. 1630 */ 1631 for (;;) { 1632 vpte_t nvpte; 1633 1634 /* 1635 * Reload for the cmpset, but make sure the pte is 1636 * still valid. 1637 */ 1638 vpte = *ptep; 1639 cpu_ccfence(); 1640 nvpte = vpte; 1641 1642 if ((vpte & VPTE_V) == 0) 1643 break; 1644 1645 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW)) 1646 nvpte |= VPTE_M | VPTE_A; 1647 if (fault_type & (VM_PROT_READ | VM_PROT_EXECUTE)) 1648 nvpte |= VPTE_A; 1649 if (vpte == nvpte) 1650 break; 1651 if (atomic_cmpset_long(ptep, vpte, nvpte)) { 1652 vm_page_dirty(fs->m); 1653 break; 1654 } 1655 } 1656 lwbuf_free(lwb); 1657 vm_page_flag_set(fs->m, PG_REFERENCED); 1658 vm_page_wakeup(fs->m); 1659 fs->m = NULL; 1660 cleanup_fault(fs); 1661 } 1662 1663 /* 1664 * When the vkernel sets VPTE_RW it expects the real kernel to 1665 * reflect VPTE_M back when the page is modified via the mapping. 1666 * In order to accomplish this the real kernel must map the page 1667 * read-only for read faults and use write faults to reflect VPTE_M 1668 * back. 1669 * 1670 * Once VPTE_M has been set, the real kernel's pte allows writing. 1671 * If the vkernel clears VPTE_M the vkernel must be sure to 1672 * MADV_INVAL the real kernel's mappings to force the real kernel 1673 * to re-fault on the next write so oit can set VPTE_M again. 1674 */ 1675 if ((fault_type & VM_PROT_WRITE) == 0 && 1676 (vpte & (VPTE_RW | VPTE_M)) != (VPTE_RW | VPTE_M)) { 1677 fs->first_prot &= ~VM_PROT_WRITE; 1678 } 1679 1680 /* 1681 * Disable EXECUTE perms if NX bit is set. 1682 */ 1683 if (vpte & VPTE_NX) 1684 fs->first_prot &= ~VM_PROT_EXECUTE; 1685 1686 /* 1687 * Combine remaining address bits with the vpte. 1688 */ 1689 *pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) + 1690 (*pindex & ((1L << vshift) - 1)); 1691 return (KERN_SUCCESS); 1692 } 1693 1694 1695 /* 1696 * This is the core of the vm_fault code. 1697 * 1698 * Do all operations required to fault-in (fs.first_ba->object, pindex). 1699 * Run through the backing store as necessary and do required COW or virtual 1700 * copy operations. The caller has already fully resolved the vm_map_entry 1701 * and, if appropriate, has created a copy-on-write layer. All we need to 1702 * do is iterate the object chain. 1703 * 1704 * On failure (fs) is unlocked and deallocated and the caller may return or 1705 * retry depending on the failure code. On success (fs) is NOT unlocked or 1706 * deallocated, fs.m will contained a resolved, busied page, and fs.ba's 1707 * object will have an additional PIP count if it is not equal to 1708 * fs.first_ba. 1709 * 1710 * If locks based on fs->first_shared or fs->shared are insufficient, 1711 * clear the appropriate field(s) and return RETRY. COWs require that 1712 * first_shared be 0, while page allocations (or frees) require that 1713 * shared be 0. Renames require that both be 0. 1714 * 1715 * NOTE! fs->[first_]shared might be set with VM_FAULT_DIRTY also set. 1716 * we will have to retry with it exclusive if the vm_page is 1717 * PG_SWAPPED. 1718 * 1719 * fs->first_ba->object must be held on call. 1720 */ 1721 static 1722 int 1723 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex, 1724 vm_prot_t fault_type, int allow_nofault) 1725 { 1726 vm_map_backing_t next_ba; 1727 vm_pindex_t pindex; 1728 int error; 1729 1730 ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_ba->object)); 1731 fs->prot = fs->first_prot; 1732 pindex = first_pindex; 1733 KKASSERT(fs->ba == fs->first_ba); 1734 1735 vm_object_pip_add(fs->first_ba->object, 1); 1736 1737 /* 1738 * If a read fault occurs we try to upgrade the page protection 1739 * and make it also writable if possible. There are three cases 1740 * where we cannot make the page mapping writable: 1741 * 1742 * (1) The mapping is read-only or the VM object is read-only, 1743 * fs->prot above will simply not have VM_PROT_WRITE set. 1744 * 1745 * (2) If the mapping is a virtual page table fs->first_prot will 1746 * have already been properly adjusted by vm_fault_vpagetable(). 1747 * to detect writes so we can set VPTE_M in the virtual page 1748 * table. Used by vkernels. 1749 * 1750 * (3) If the VM page is read-only or copy-on-write, upgrading would 1751 * just result in an unnecessary COW fault. 1752 * 1753 * (4) If the pmap specifically requests A/M bit emulation, downgrade 1754 * here. 1755 */ 1756 #if 0 1757 /* see vpagetable code */ 1758 if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) { 1759 if ((fault_type & VM_PROT_WRITE) == 0) 1760 fs->prot &= ~VM_PROT_WRITE; 1761 } 1762 #endif 1763 1764 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 1765 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 1766 if ((fault_type & VM_PROT_WRITE) == 0) 1767 fs->prot &= ~VM_PROT_WRITE; 1768 } 1769 1770 /* vm_object_hold(fs->ba->object); implied b/c ba == first_ba */ 1771 1772 for (;;) { 1773 /* 1774 * If the object is dead, we stop here 1775 */ 1776 if (fs->ba->object->flags & OBJ_DEAD) { 1777 vm_object_pip_wakeup(fs->first_ba->object); 1778 unlock_things(fs); 1779 return (KERN_PROTECTION_FAILURE); 1780 } 1781 1782 /* 1783 * See if the page is resident. Wait/Retry if the page is 1784 * busy (lots of stuff may have changed so we can't continue 1785 * in that case). 1786 * 1787 * We can theoretically allow the soft-busy case on a read 1788 * fault if the page is marked valid, but since such 1789 * pages are typically already pmap'd, putting that 1790 * special case in might be more effort then it is 1791 * worth. We cannot under any circumstances mess 1792 * around with a vm_page_t->busy page except, perhaps, 1793 * to pmap it. 1794 */ 1795 fs->m = vm_page_lookup_busy_try(fs->ba->object, pindex, 1796 TRUE, &error); 1797 if (error) { 1798 vm_object_pip_wakeup(fs->first_ba->object); 1799 unlock_things(fs); 1800 vm_page_sleep_busy(fs->m, TRUE, "vmpfw"); 1801 mycpu->gd_cnt.v_intrans++; 1802 fs->m = NULL; 1803 return (KERN_TRY_AGAIN); 1804 } 1805 if (fs->m) { 1806 /* 1807 * The page is busied for us. 1808 * 1809 * If reactivating a page from PQ_CACHE we may have 1810 * to rate-limit. 1811 */ 1812 int queue = fs->m->queue; 1813 vm_page_unqueue_nowakeup(fs->m); 1814 1815 if ((queue - fs->m->pc) == PQ_CACHE && 1816 vm_page_count_severe()) { 1817 vm_page_activate(fs->m); 1818 vm_page_wakeup(fs->m); 1819 fs->m = NULL; 1820 vm_object_pip_wakeup(fs->first_ba->object); 1821 unlock_things(fs); 1822 if (allow_nofault == 0 || 1823 (curthread->td_flags & TDF_NOFAULT) == 0) { 1824 thread_t td; 1825 1826 vm_wait_pfault(); 1827 td = curthread; 1828 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1829 return (KERN_PROTECTION_FAILURE); 1830 } 1831 return (KERN_TRY_AGAIN); 1832 } 1833 1834 /* 1835 * If it still isn't completely valid (readable), 1836 * or if a read-ahead-mark is set on the VM page, 1837 * jump to readrest, else we found the page and 1838 * can return. 1839 * 1840 * We can release the spl once we have marked the 1841 * page busy. 1842 */ 1843 if (fs->m->object != &kernel_object) { 1844 if ((fs->m->valid & VM_PAGE_BITS_ALL) != 1845 VM_PAGE_BITS_ALL) { 1846 goto readrest; 1847 } 1848 if (fs->m->flags & PG_RAM) { 1849 if (debug_cluster) 1850 kprintf("R"); 1851 vm_page_flag_clear(fs->m, PG_RAM); 1852 goto readrest; 1853 } 1854 } 1855 atomic_clear_int(&fs->first_ba->flags, 1856 VM_MAP_BACK_EXCL_HEUR); 1857 break; /* break to PAGE HAS BEEN FOUND */ 1858 } 1859 1860 /* 1861 * Page is not resident, If this is the search termination 1862 * or the pager might contain the page, allocate a new page. 1863 */ 1864 if (TRYPAGER(fs) || fs->ba == fs->first_ba) { 1865 /* 1866 * If this is a SWAP object we can use the shared 1867 * lock to check existence of a swap block. If 1868 * there isn't one we can skip to the next object. 1869 * 1870 * However, if this is the first object we allocate 1871 * a page now just in case we need to copy to it 1872 * later. 1873 */ 1874 if (fs->ba != fs->first_ba && 1875 fs->ba->object->type == OBJT_SWAP) { 1876 if (swap_pager_haspage_locked(fs->ba->object, 1877 pindex) == 0) { 1878 goto next; 1879 } 1880 } 1881 1882 /* 1883 * Allocating, must be exclusive. 1884 */ 1885 atomic_set_int(&fs->first_ba->flags, 1886 VM_MAP_BACK_EXCL_HEUR); 1887 if (fs->ba == fs->first_ba && fs->first_shared) { 1888 fs->first_shared = 0; 1889 vm_object_pip_wakeup(fs->first_ba->object); 1890 unlock_things(fs); 1891 return (KERN_TRY_AGAIN); 1892 } 1893 if (fs->ba != fs->first_ba && fs->shared) { 1894 fs->first_shared = 0; 1895 fs->shared = 0; 1896 vm_object_pip_wakeup(fs->first_ba->object); 1897 unlock_things(fs); 1898 return (KERN_TRY_AGAIN); 1899 } 1900 1901 /* 1902 * If the page is beyond the object size we fail 1903 */ 1904 if (pindex >= fs->ba->object->size) { 1905 vm_object_pip_wakeup(fs->first_ba->object); 1906 unlock_things(fs); 1907 return (KERN_PROTECTION_FAILURE); 1908 } 1909 1910 /* 1911 * Allocate a new page for this object/offset pair. 1912 * 1913 * It is possible for the allocation to race, so 1914 * handle the case. 1915 */ 1916 fs->m = NULL; 1917 if (!vm_page_count_severe()) { 1918 fs->m = vm_page_alloc(fs->ba->object, pindex, 1919 ((fs->vp || fs->ba->backing_ba) ? 1920 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL : 1921 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL | 1922 VM_ALLOC_USE_GD | VM_ALLOC_ZERO)); 1923 } 1924 if (fs->m == NULL) { 1925 vm_object_pip_wakeup(fs->first_ba->object); 1926 unlock_things(fs); 1927 if (allow_nofault == 0 || 1928 (curthread->td_flags & TDF_NOFAULT) == 0) { 1929 thread_t td; 1930 1931 vm_wait_pfault(); 1932 td = curthread; 1933 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1934 return (KERN_PROTECTION_FAILURE); 1935 } 1936 return (KERN_TRY_AGAIN); 1937 } 1938 1939 /* 1940 * Fall through to readrest. We have a new page which 1941 * will have to be paged (since m->valid will be 0). 1942 */ 1943 } 1944 1945 readrest: 1946 /* 1947 * We have found an invalid or partially valid page, a 1948 * page with a read-ahead mark which might be partially or 1949 * fully valid (and maybe dirty too), or we have allocated 1950 * a new page. 1951 * 1952 * Attempt to fault-in the page if there is a chance that the 1953 * pager has it, and potentially fault in additional pages 1954 * at the same time. 1955 * 1956 * If TRYPAGER is true then fs.m will be non-NULL and busied 1957 * for us. 1958 */ 1959 if (TRYPAGER(fs)) { 1960 u_char behavior = vm_map_entry_behavior(fs->entry); 1961 vm_object_t object; 1962 vm_page_t first_m; 1963 int seqaccess; 1964 int rv; 1965 1966 if (behavior == MAP_ENTRY_BEHAV_RANDOM) 1967 seqaccess = 0; 1968 else 1969 seqaccess = -1; 1970 1971 /* 1972 * Doing I/O may synchronously insert additional 1973 * pages so we can't be shared at this point either. 1974 * 1975 * NOTE: We can't free fs->m here in the allocated 1976 * case (fs->ba != fs->first_ba) as this 1977 * would require an exclusively locked 1978 * VM object. 1979 */ 1980 if (fs->ba == fs->first_ba && fs->first_shared) { 1981 vm_page_deactivate(fs->m); 1982 vm_page_wakeup(fs->m); 1983 fs->m = NULL; 1984 fs->first_shared = 0; 1985 vm_object_pip_wakeup(fs->first_ba->object); 1986 unlock_things(fs); 1987 return (KERN_TRY_AGAIN); 1988 } 1989 if (fs->ba != fs->first_ba && fs->shared) { 1990 vm_page_deactivate(fs->m); 1991 vm_page_wakeup(fs->m); 1992 fs->m = NULL; 1993 fs->first_shared = 0; 1994 fs->shared = 0; 1995 vm_object_pip_wakeup(fs->first_ba->object); 1996 unlock_things(fs); 1997 return (KERN_TRY_AGAIN); 1998 } 1999 2000 object = fs->ba->object; 2001 first_m = NULL; 2002 2003 /* object is held, no more access to entry or ba's */ 2004 2005 /* 2006 * Acquire the page data. We still hold object 2007 * and the page has been BUSY's. 2008 * 2009 * We own the page, but we must re-issue the lookup 2010 * because the pager may have replaced it (for example, 2011 * in order to enter a fictitious page into the 2012 * object). In this situation the pager will have 2013 * cleaned up the old page and left the new one 2014 * busy for us. 2015 * 2016 * If we got here through a PG_RAM read-ahead 2017 * mark the page may be partially dirty and thus 2018 * not freeable. Don't bother checking to see 2019 * if the pager has the page because we can't free 2020 * it anyway. We have to depend on the get_page 2021 * operation filling in any gaps whether there is 2022 * backing store or not. 2023 * 2024 * We must dispose of the page (fs->m) and also 2025 * possibly first_m (the fronting layer). If 2026 * this is a write fault leave the page intact 2027 * because we will probably have to copy fs->m 2028 * to fs->first_m on the retry. If this is a 2029 * read fault we probably won't need the page. 2030 */ 2031 rv = vm_pager_get_page(object, &fs->m, seqaccess); 2032 2033 if (rv == VM_PAGER_OK) { 2034 ++fs->hardfault; 2035 fs->m = vm_page_lookup(object, pindex); 2036 if (fs->m) { 2037 vm_page_activate(fs->m); 2038 vm_page_wakeup(fs->m); 2039 fs->m = NULL; 2040 } 2041 2042 if (fs->m) { 2043 /* have page */ 2044 break; 2045 } 2046 vm_object_pip_wakeup(fs->first_ba->object); 2047 unlock_things(fs); 2048 return (KERN_TRY_AGAIN); 2049 } 2050 2051 /* 2052 * If the pager doesn't have the page, continue on 2053 * to the next object. Retain the vm_page if this 2054 * is the first object, we may need to copy into 2055 * it later. 2056 */ 2057 if (rv == VM_PAGER_FAIL) { 2058 if (fs->ba != fs->first_ba) { 2059 vm_page_free(fs->m); 2060 fs->m = NULL; 2061 } 2062 goto next; 2063 } 2064 2065 /* 2066 * Remove the bogus page (which does not exist at this 2067 * object/offset). 2068 * 2069 * Also wake up any other process that may want to bring 2070 * in this page. 2071 * 2072 * If this is the top-level object, we must leave the 2073 * busy page to prevent another process from rushing 2074 * past us, and inserting the page in that object at 2075 * the same time that we are. 2076 */ 2077 if (rv == VM_PAGER_ERROR) { 2078 if (curproc) { 2079 kprintf("vm_fault: pager read error, " 2080 "pid %d (%s)\n", 2081 curproc->p_pid, 2082 curproc->p_comm); 2083 } else { 2084 kprintf("vm_fault: pager read error, " 2085 "thread %p (%s)\n", 2086 curthread, 2087 curthread->td_comm); 2088 } 2089 } 2090 2091 /* 2092 * I/O error or data outside pager's range. 2093 */ 2094 if (fs->m) { 2095 vnode_pager_freepage(fs->m); 2096 fs->m = NULL; 2097 } 2098 if (first_m) { 2099 vm_page_free(first_m); 2100 first_m = NULL; /* safety */ 2101 } 2102 vm_object_pip_wakeup(object); 2103 unlock_things(fs); 2104 2105 switch(rv) { 2106 case VM_PAGER_ERROR: 2107 return (KERN_FAILURE); 2108 case VM_PAGER_BAD: 2109 return (KERN_PROTECTION_FAILURE); 2110 default: 2111 return (KERN_PROTECTION_FAILURE); 2112 } 2113 2114 #if 0 2115 /* 2116 * Data outside the range of the pager or an I/O error 2117 * 2118 * The page may have been wired during the pagein, 2119 * e.g. by the buffer cache, and cannot simply be 2120 * freed. Call vnode_pager_freepage() to deal with it. 2121 * 2122 * The object is not held shared so we can safely 2123 * free the page. 2124 */ 2125 if (fs->ba != fs->first_ba) { 2126 2127 /* 2128 * XXX - we cannot just fall out at this 2129 * point, m has been freed and is invalid! 2130 */ 2131 } 2132 2133 /* 2134 * XXX - the check for kernel_map is a kludge to work 2135 * around having the machine panic on a kernel space 2136 * fault w/ I/O error. 2137 */ 2138 if (((fs->map != &kernel_map) && 2139 (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 2140 if (fs->m) { 2141 /* from just above */ 2142 KKASSERT(fs->first_shared == 0); 2143 vnode_pager_freepage(fs->m); 2144 fs->m = NULL; 2145 } 2146 /* NOT REACHED */ 2147 } 2148 #endif 2149 } 2150 2151 next: 2152 /* 2153 * We get here if the object has a default pager (or unwiring) 2154 * or the pager doesn't have the page. 2155 * 2156 * fs->first_m will be used for the COW unless we find a 2157 * deeper page to be mapped read-only, in which case the 2158 * unlock*(fs) will free first_m. 2159 */ 2160 if (fs->ba == fs->first_ba) 2161 fs->first_m = fs->m; 2162 2163 /* 2164 * Move on to the next object. The chain lock should prevent 2165 * the backing_object from getting ripped out from under us. 2166 * 2167 * The object lock for the next object is governed by 2168 * fs->shared. 2169 */ 2170 next_ba = fs->ba->backing_ba; 2171 if (next_ba == NULL) { 2172 /* 2173 * If there's no object left, fill the page in the top 2174 * object with zeros. 2175 */ 2176 if (fs->ba != fs->first_ba) { 2177 vm_object_pip_wakeup(fs->ba->object); 2178 vm_object_drop(fs->ba->object); 2179 fs->ba = fs->first_ba; 2180 pindex = first_pindex; 2181 fs->m = fs->first_m; 2182 } 2183 fs->first_m = NULL; 2184 2185 /* 2186 * Zero the page and mark it valid. 2187 */ 2188 vm_page_zero_fill(fs->m); 2189 mycpu->gd_cnt.v_zfod++; 2190 fs->m->valid = VM_PAGE_BITS_ALL; 2191 break; /* break to PAGE HAS BEEN FOUND */ 2192 } 2193 2194 if (fs->shared) 2195 vm_object_hold_shared(next_ba->object); 2196 else 2197 vm_object_hold(next_ba->object); 2198 KKASSERT(next_ba == fs->ba->backing_ba); 2199 pindex -= OFF_TO_IDX(fs->ba->offset); 2200 pindex += OFF_TO_IDX(next_ba->offset); 2201 2202 if (fs->ba != fs->first_ba) { 2203 vm_object_pip_wakeup(fs->ba->object); 2204 vm_object_lock_swap(); /* flip ba/next_ba */ 2205 vm_object_drop(fs->ba->object); 2206 } 2207 fs->ba = next_ba; 2208 vm_object_pip_add(next_ba->object, 1); 2209 } 2210 2211 /* 2212 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 2213 * is held.] 2214 * 2215 * object still held. 2216 * vm_map may not be locked (determined by fs->lookup_still_valid) 2217 * 2218 * local shared variable may be different from fs->shared. 2219 * 2220 * If the page is being written, but isn't already owned by the 2221 * top-level object, we have to copy it into a new page owned by the 2222 * top-level object. 2223 */ 2224 KASSERT((fs->m->busy_count & PBUSY_LOCKED) != 0, 2225 ("vm_fault: not busy after main loop")); 2226 2227 if (fs->ba != fs->first_ba) { 2228 /* 2229 * We only really need to copy if we want to write it. 2230 */ 2231 if (fault_type & VM_PROT_WRITE) { 2232 #if 0 2233 /* CODE REFACTOR IN PROGRESS, REMOVE OPTIMIZATION */ 2234 /* 2235 * This allows pages to be virtually copied from a 2236 * backing_object into the first_object, where the 2237 * backing object has no other refs to it, and cannot 2238 * gain any more refs. Instead of a bcopy, we just 2239 * move the page from the backing object to the 2240 * first object. Note that we must mark the page 2241 * dirty in the first object so that it will go out 2242 * to swap when needed. 2243 */ 2244 if (virtual_copy_ok(fs)) { 2245 /* 2246 * (first_m) and (m) are both busied. We have 2247 * move (m) into (first_m)'s object/pindex 2248 * in an atomic fashion, then free (first_m). 2249 * 2250 * first_object is held so second remove 2251 * followed by the rename should wind 2252 * up being atomic. vm_page_free() might 2253 * block so we don't do it until after the 2254 * rename. 2255 */ 2256 vm_page_protect(fs->first_m, VM_PROT_NONE); 2257 vm_page_remove(fs->first_m); 2258 vm_page_rename(fs->m, 2259 fs->first_ba->object, 2260 first_pindex); 2261 vm_page_free(fs->first_m); 2262 fs->first_m = fs->m; 2263 fs->m = NULL; 2264 mycpu->gd_cnt.v_cow_optim++; 2265 } else 2266 #endif 2267 { 2268 /* 2269 * Oh, well, lets copy it. 2270 * 2271 * We used to unmap the original page here 2272 * because vm_fault_page() didn't and this 2273 * would cause havoc for the umtx*() code 2274 * and the procfs code. 2275 * 2276 * This is no longer necessary. The 2277 * vm_fault_page() routine will now unmap the 2278 * page after a COW, and the umtx code will 2279 * recover on its own. 2280 */ 2281 /* 2282 * NOTE: Since fs->m is a backing page, it 2283 * is read-only, so there isn't any 2284 * copy race vs writers. 2285 */ 2286 KKASSERT(fs->first_shared == 0); 2287 vm_page_copy(fs->m, fs->first_m); 2288 /* pmap_remove_specific( 2289 &curthread->td_lwp->lwp_vmspace->vm_pmap, 2290 fs->m); */ 2291 } 2292 2293 /* 2294 * We no longer need the old page or object. 2295 */ 2296 if (fs->m) 2297 release_page(fs); 2298 2299 /* 2300 * fs->ba != fs->first_ba due to above conditional 2301 */ 2302 vm_object_pip_wakeup(fs->ba->object); 2303 vm_object_drop(fs->ba->object); 2304 fs->ba = fs->first_ba; 2305 2306 /* 2307 * Only use the new page below... 2308 */ 2309 mycpu->gd_cnt.v_cow_faults++; 2310 fs->m = fs->first_m; 2311 pindex = first_pindex; 2312 } else { 2313 /* 2314 * If it wasn't a write fault avoid having to copy 2315 * the page by mapping it read-only from backing 2316 * store. The process is not allowed to modify 2317 * backing pages. 2318 */ 2319 fs->prot &= ~VM_PROT_WRITE; 2320 } 2321 } 2322 2323 /* 2324 * Relock the map if necessary, then check the generation count. 2325 * relock_map() will update fs->timestamp to account for the 2326 * relocking if necessary. 2327 * 2328 * If the count has changed after relocking then all sorts of 2329 * crap may have happened and we have to retry. 2330 * 2331 * NOTE: The relock_map() can fail due to a deadlock against 2332 * the vm_page we are holding BUSY. 2333 */ 2334 KKASSERT(fs->lookup_still_valid != 0); 2335 #if 0 2336 if (fs->lookup_still_valid == 0 && fs->map) { 2337 if (relock_map(fs) || 2338 fs->map->timestamp != fs->map_generation) { 2339 release_page(fs); 2340 vm_object_pip_wakeup(fs->first_ba->object); 2341 unlock_things(fs); 2342 return (KERN_TRY_AGAIN); 2343 } 2344 } 2345 #endif 2346 2347 /* 2348 * If the fault is a write, we know that this page is being 2349 * written NOW so dirty it explicitly to save on pmap_is_modified() 2350 * calls later. 2351 * 2352 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC 2353 * if the page is already dirty to prevent data written with 2354 * the expectation of being synced from not being synced. 2355 * Likewise if this entry does not request NOSYNC then make 2356 * sure the page isn't marked NOSYNC. Applications sharing 2357 * data should use the same flags to avoid ping ponging. 2358 * 2359 * Also tell the backing pager, if any, that it should remove 2360 * any swap backing since the page is now dirty. 2361 */ 2362 vm_page_activate(fs->m); 2363 if (fs->prot & VM_PROT_WRITE) { 2364 vm_object_set_writeable_dirty(fs->m->object); 2365 vm_set_nosync(fs->m, fs->entry); 2366 if (fs->fault_flags & VM_FAULT_DIRTY) { 2367 vm_page_dirty(fs->m); 2368 if (fs->m->flags & PG_SWAPPED) { 2369 /* 2370 * If the page is swapped out we have to call 2371 * swap_pager_unswapped() which requires an 2372 * exclusive object lock. If we are shared, 2373 * we must clear the shared flag and retry. 2374 */ 2375 if ((fs->ba == fs->first_ba && 2376 fs->first_shared) || 2377 (fs->ba != fs->first_ba && fs->shared)) { 2378 vm_page_wakeup(fs->m); 2379 fs->m = NULL; 2380 if (fs->ba == fs->first_ba) 2381 fs->first_shared = 0; 2382 else 2383 fs->shared = 0; 2384 vm_object_pip_wakeup( 2385 fs->first_ba->object); 2386 unlock_things(fs); 2387 return (KERN_TRY_AGAIN); 2388 } 2389 swap_pager_unswapped(fs->m); 2390 } 2391 } 2392 } 2393 2394 /* 2395 * We found our page at backing layer ba. Leave the layer state 2396 * intact. 2397 */ 2398 2399 vm_object_pip_wakeup(fs->first_ba->object); 2400 #if 0 2401 if (fs->ba != fs->first_ba) 2402 vm_object_drop(fs->ba->object); 2403 #endif 2404 2405 /* 2406 * Page had better still be busy. We are still locked up and 2407 * fs->ba->object will have another PIP reference for the case 2408 * where fs->ba != fs->first_ba. 2409 */ 2410 KASSERT(fs->m->busy_count & PBUSY_LOCKED, 2411 ("vm_fault: page %p not busy!", fs->m)); 2412 2413 /* 2414 * Sanity check: page must be completely valid or it is not fit to 2415 * map into user space. vm_pager_get_pages() ensures this. 2416 */ 2417 if (fs->m->valid != VM_PAGE_BITS_ALL) { 2418 vm_page_zero_invalid(fs->m, TRUE); 2419 kprintf("Warning: page %p partially invalid on fault\n", fs->m); 2420 } 2421 2422 return (KERN_SUCCESS); 2423 } 2424 2425 /* 2426 * Wire down a range of virtual addresses in a map. The entry in question 2427 * should be marked in-transition and the map must be locked. We must 2428 * release the map temporarily while faulting-in the page to avoid a 2429 * deadlock. Note that the entry may be clipped while we are blocked but 2430 * will never be freed. 2431 * 2432 * map must be locked on entry. 2433 */ 2434 int 2435 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, 2436 boolean_t user_wire, int kmflags) 2437 { 2438 boolean_t fictitious; 2439 vm_offset_t start; 2440 vm_offset_t end; 2441 vm_offset_t va; 2442 pmap_t pmap; 2443 int rv; 2444 int wire_prot; 2445 int fault_flags; 2446 vm_page_t m; 2447 2448 if (user_wire) { 2449 wire_prot = VM_PROT_READ; 2450 fault_flags = VM_FAULT_USER_WIRE; 2451 } else { 2452 wire_prot = VM_PROT_READ | VM_PROT_WRITE; 2453 fault_flags = VM_FAULT_CHANGE_WIRING; 2454 } 2455 if (kmflags & KM_NOTLBSYNC) 2456 wire_prot |= VM_PROT_NOSYNC; 2457 2458 pmap = vm_map_pmap(map); 2459 start = entry->ba.start; 2460 end = entry->ba.end; 2461 2462 switch(entry->maptype) { 2463 case VM_MAPTYPE_NORMAL: 2464 case VM_MAPTYPE_VPAGETABLE: 2465 fictitious = entry->ba.object && 2466 ((entry->ba.object->type == OBJT_DEVICE) || 2467 (entry->ba.object->type == OBJT_MGTDEVICE)); 2468 break; 2469 case VM_MAPTYPE_UKSMAP: 2470 fictitious = TRUE; 2471 break; 2472 default: 2473 fictitious = FALSE; 2474 break; 2475 } 2476 2477 if (entry->eflags & MAP_ENTRY_KSTACK) 2478 start += PAGE_SIZE; 2479 map->timestamp++; 2480 vm_map_unlock(map); 2481 2482 /* 2483 * We simulate a fault to get the page and enter it in the physical 2484 * map. 2485 */ 2486 for (va = start; va < end; va += PAGE_SIZE) { 2487 rv = vm_fault(map, va, wire_prot, fault_flags); 2488 if (rv) { 2489 while (va > start) { 2490 va -= PAGE_SIZE; 2491 m = pmap_unwire(pmap, va); 2492 if (m && !fictitious) { 2493 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2494 vm_page_unwire(m, 1); 2495 vm_page_wakeup(m); 2496 } 2497 } 2498 goto done; 2499 } 2500 } 2501 rv = KERN_SUCCESS; 2502 done: 2503 vm_map_lock(map); 2504 2505 return (rv); 2506 } 2507 2508 /* 2509 * Unwire a range of virtual addresses in a map. The map should be 2510 * locked. 2511 */ 2512 void 2513 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry) 2514 { 2515 boolean_t fictitious; 2516 vm_offset_t start; 2517 vm_offset_t end; 2518 vm_offset_t va; 2519 pmap_t pmap; 2520 vm_page_t m; 2521 2522 pmap = vm_map_pmap(map); 2523 start = entry->ba.start; 2524 end = entry->ba.end; 2525 fictitious = entry->ba.object && 2526 ((entry->ba.object->type == OBJT_DEVICE) || 2527 (entry->ba.object->type == OBJT_MGTDEVICE)); 2528 if (entry->eflags & MAP_ENTRY_KSTACK) 2529 start += PAGE_SIZE; 2530 2531 /* 2532 * Since the pages are wired down, we must be able to get their 2533 * mappings from the physical map system. 2534 */ 2535 for (va = start; va < end; va += PAGE_SIZE) { 2536 m = pmap_unwire(pmap, va); 2537 if (m && !fictitious) { 2538 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2539 vm_page_unwire(m, 1); 2540 vm_page_wakeup(m); 2541 } 2542 } 2543 } 2544 2545 /* 2546 * Simulate write faults to bring all data into the head object, return 2547 * KERN_SUCCESS on success (which should be always unless the system runs 2548 * out of memory). 2549 * 2550 * The caller will handle destroying the backing_ba's. 2551 */ 2552 int 2553 vm_fault_collapse(vm_map_t map, vm_map_entry_t entry) 2554 { 2555 struct faultstate fs; 2556 vm_ooffset_t scan; 2557 vm_pindex_t pindex; 2558 vm_object_t object; 2559 int rv; 2560 int all_shadowed; 2561 2562 bzero(&fs, sizeof(fs)); 2563 object = entry->ba.object; 2564 2565 fs.first_prot = entry->max_protection | /* optional VM_PROT_EXECUTE */ 2566 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE; 2567 fs.fault_flags = VM_FAULT_NORMAL; 2568 fs.map = map; 2569 fs.entry = entry; 2570 fs.lookup_still_valid = -1; /* leave map atomically locked */ 2571 fs.first_ba = &entry->ba; 2572 fs.first_ba_held = -1; /* leave object held */ 2573 2574 /* fs.hardfault */ 2575 2576 vm_object_hold(object); 2577 rv = KERN_SUCCESS; 2578 2579 scan = entry->ba.start; 2580 all_shadowed = 1; 2581 2582 while (scan < entry->ba.end) { 2583 pindex = OFF_TO_IDX(entry->ba.offset + (scan - entry->ba.start)); 2584 2585 if (vm_page_lookup(object, pindex)) { 2586 scan += PAGE_SIZE; 2587 continue; 2588 } 2589 2590 all_shadowed = 0; 2591 fs.ba = fs.first_ba; 2592 fs.prot = fs.first_prot; 2593 2594 rv = vm_fault_object(&fs, pindex, fs.first_prot, 1); 2595 if (rv == KERN_TRY_AGAIN) 2596 continue; 2597 if (rv != KERN_SUCCESS) 2598 break; 2599 vm_page_flag_set(fs.m, PG_REFERENCED); 2600 vm_page_activate(fs.m); 2601 vm_page_wakeup(fs.m); 2602 scan += PAGE_SIZE; 2603 } 2604 KKASSERT(entry->ba.object == object); 2605 vm_object_drop(object); 2606 2607 /* 2608 * If the fronting object did not have every page we have to clear 2609 * the pmap range due to the pages being changed so we can fault-in 2610 * the proper pages. 2611 */ 2612 if (all_shadowed == 0) 2613 pmap_remove(map->pmap, entry->ba.start, entry->ba.end); 2614 2615 return rv; 2616 } 2617 2618 /* 2619 * Copy all of the pages from one map entry to another. If the source 2620 * is wired down we just use vm_page_lookup(). If not we use 2621 * vm_fault_object(). 2622 * 2623 * The source and destination maps must be locked for write. 2624 * The source and destination maps token must be held 2625 * 2626 * No other requirements. 2627 * 2628 * XXX do segment optimization 2629 */ 2630 void 2631 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 2632 vm_map_entry_t dst_entry, vm_map_entry_t src_entry) 2633 { 2634 vm_object_t dst_object; 2635 vm_object_t src_object; 2636 vm_ooffset_t dst_offset; 2637 vm_ooffset_t src_offset; 2638 vm_prot_t prot; 2639 vm_offset_t vaddr; 2640 vm_page_t dst_m; 2641 vm_page_t src_m; 2642 2643 src_object = src_entry->ba.object; 2644 src_offset = src_entry->ba.offset; 2645 2646 /* 2647 * Create the top-level object for the destination entry. (Doesn't 2648 * actually shadow anything - we copy the pages directly.) 2649 */ 2650 vm_map_entry_allocate_object(dst_entry); 2651 dst_object = dst_entry->ba.object; 2652 2653 prot = dst_entry->max_protection; 2654 2655 /* 2656 * Loop through all of the pages in the entry's range, copying each 2657 * one from the source object (it should be there) to the destination 2658 * object. 2659 */ 2660 vm_object_hold(src_object); 2661 vm_object_hold(dst_object); 2662 2663 for (vaddr = dst_entry->ba.start, dst_offset = 0; 2664 vaddr < dst_entry->ba.end; 2665 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 2666 2667 /* 2668 * Allocate a page in the destination object 2669 */ 2670 do { 2671 dst_m = vm_page_alloc(dst_object, 2672 OFF_TO_IDX(dst_offset), 2673 VM_ALLOC_NORMAL); 2674 if (dst_m == NULL) { 2675 vm_wait(0); 2676 } 2677 } while (dst_m == NULL); 2678 2679 /* 2680 * Find the page in the source object, and copy it in. 2681 * (Because the source is wired down, the page will be in 2682 * memory.) 2683 */ 2684 src_m = vm_page_lookup(src_object, 2685 OFF_TO_IDX(dst_offset + src_offset)); 2686 if (src_m == NULL) 2687 panic("vm_fault_copy_wired: page missing"); 2688 2689 vm_page_copy(src_m, dst_m); 2690 2691 /* 2692 * Enter it in the pmap... 2693 */ 2694 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry); 2695 2696 /* 2697 * Mark it no longer busy, and put it on the active list. 2698 */ 2699 vm_page_activate(dst_m); 2700 vm_page_wakeup(dst_m); 2701 } 2702 vm_object_drop(dst_object); 2703 vm_object_drop(src_object); 2704 } 2705 2706 #if 0 2707 2708 /* 2709 * This routine checks around the requested page for other pages that 2710 * might be able to be faulted in. This routine brackets the viable 2711 * pages for the pages to be paged in. 2712 * 2713 * Inputs: 2714 * m, rbehind, rahead 2715 * 2716 * Outputs: 2717 * marray (array of vm_page_t), reqpage (index of requested page) 2718 * 2719 * Return value: 2720 * number of pages in marray 2721 */ 2722 static int 2723 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead, 2724 vm_page_t *marray, int *reqpage) 2725 { 2726 int i,j; 2727 vm_object_t object; 2728 vm_pindex_t pindex, startpindex, endpindex, tpindex; 2729 vm_page_t rtm; 2730 int cbehind, cahead; 2731 2732 object = m->object; 2733 pindex = m->pindex; 2734 2735 /* 2736 * we don't fault-ahead for device pager 2737 */ 2738 if ((object->type == OBJT_DEVICE) || 2739 (object->type == OBJT_MGTDEVICE)) { 2740 *reqpage = 0; 2741 marray[0] = m; 2742 return 1; 2743 } 2744 2745 /* 2746 * if the requested page is not available, then give up now 2747 */ 2748 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 2749 *reqpage = 0; /* not used by caller, fix compiler warn */ 2750 return 0; 2751 } 2752 2753 if ((cbehind == 0) && (cahead == 0)) { 2754 *reqpage = 0; 2755 marray[0] = m; 2756 return 1; 2757 } 2758 2759 if (rahead > cahead) { 2760 rahead = cahead; 2761 } 2762 2763 if (rbehind > cbehind) { 2764 rbehind = cbehind; 2765 } 2766 2767 /* 2768 * Do not do any readahead if we have insufficient free memory. 2769 * 2770 * XXX code was broken disabled before and has instability 2771 * with this conditonal fixed, so shortcut for now. 2772 */ 2773 if (burst_fault == 0 || vm_page_count_severe()) { 2774 marray[0] = m; 2775 *reqpage = 0; 2776 return 1; 2777 } 2778 2779 /* 2780 * scan backward for the read behind pages -- in memory 2781 * 2782 * Assume that if the page is not found an interrupt will not 2783 * create it. Theoretically interrupts can only remove (busy) 2784 * pages, not create new associations. 2785 */ 2786 if (pindex > 0) { 2787 if (rbehind > pindex) { 2788 rbehind = pindex; 2789 startpindex = 0; 2790 } else { 2791 startpindex = pindex - rbehind; 2792 } 2793 2794 vm_object_hold(object); 2795 for (tpindex = pindex; tpindex > startpindex; --tpindex) { 2796 if (vm_page_lookup(object, tpindex - 1)) 2797 break; 2798 } 2799 2800 i = 0; 2801 while (tpindex < pindex) { 2802 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2803 VM_ALLOC_NULL_OK); 2804 if (rtm == NULL) { 2805 for (j = 0; j < i; j++) { 2806 vm_page_free(marray[j]); 2807 } 2808 vm_object_drop(object); 2809 marray[0] = m; 2810 *reqpage = 0; 2811 return 1; 2812 } 2813 marray[i] = rtm; 2814 ++i; 2815 ++tpindex; 2816 } 2817 vm_object_drop(object); 2818 } else { 2819 i = 0; 2820 } 2821 2822 /* 2823 * Assign requested page 2824 */ 2825 marray[i] = m; 2826 *reqpage = i; 2827 ++i; 2828 2829 /* 2830 * Scan forwards for read-ahead pages 2831 */ 2832 tpindex = pindex + 1; 2833 endpindex = tpindex + rahead; 2834 if (endpindex > object->size) 2835 endpindex = object->size; 2836 2837 vm_object_hold(object); 2838 while (tpindex < endpindex) { 2839 if (vm_page_lookup(object, tpindex)) 2840 break; 2841 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2842 VM_ALLOC_NULL_OK); 2843 if (rtm == NULL) 2844 break; 2845 marray[i] = rtm; 2846 ++i; 2847 ++tpindex; 2848 } 2849 vm_object_drop(object); 2850 2851 return (i); 2852 } 2853 2854 #endif 2855 2856 /* 2857 * vm_prefault() provides a quick way of clustering pagefaults into a 2858 * processes address space. It is a "cousin" of pmap_object_init_pt, 2859 * except it runs at page fault time instead of mmap time. 2860 * 2861 * vm.fast_fault Enables pre-faulting zero-fill pages 2862 * 2863 * vm.prefault_pages Number of pages (1/2 negative, 1/2 positive) to 2864 * prefault. Scan stops in either direction when 2865 * a page is found to already exist. 2866 * 2867 * This code used to be per-platform pmap_prefault(). It is now 2868 * machine-independent and enhanced to also pre-fault zero-fill pages 2869 * (see vm.fast_fault) as well as make them writable, which greatly 2870 * reduces the number of page faults programs incur. 2871 * 2872 * Application performance when pre-faulting zero-fill pages is heavily 2873 * dependent on the application. Very tiny applications like /bin/echo 2874 * lose a little performance while applications of any appreciable size 2875 * gain performance. Prefaulting multiple pages also reduces SMP 2876 * congestion and can improve SMP performance significantly. 2877 * 2878 * NOTE! prot may allow writing but this only applies to the top level 2879 * object. If we wind up mapping a page extracted from a backing 2880 * object we have to make sure it is read-only. 2881 * 2882 * NOTE! The caller has already handled any COW operations on the 2883 * vm_map_entry via the normal fault code. Do NOT call this 2884 * shortcut unless the normal fault code has run on this entry. 2885 * 2886 * The related map must be locked. 2887 * No other requirements. 2888 */ 2889 __read_mostly static int vm_prefault_pages = 8; 2890 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0, 2891 "Maximum number of pages to pre-fault"); 2892 __read_mostly static int vm_fast_fault = 1; 2893 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0, 2894 "Burst fault zero-fill regions"); 2895 2896 /* 2897 * Set PG_NOSYNC if the map entry indicates so, but only if the page 2898 * is not already dirty by other means. This will prevent passive 2899 * filesystem syncing as well as 'sync' from writing out the page. 2900 */ 2901 static void 2902 vm_set_nosync(vm_page_t m, vm_map_entry_t entry) 2903 { 2904 if (entry->eflags & MAP_ENTRY_NOSYNC) { 2905 if (m->dirty == 0) 2906 vm_page_flag_set(m, PG_NOSYNC); 2907 } else { 2908 vm_page_flag_clear(m, PG_NOSYNC); 2909 } 2910 } 2911 2912 static void 2913 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot, 2914 int fault_flags) 2915 { 2916 vm_map_backing_t ba; /* first ba */ 2917 struct lwp *lp; 2918 vm_page_t m; 2919 vm_offset_t addr; 2920 vm_pindex_t index; 2921 vm_pindex_t pindex; 2922 vm_object_t object; 2923 int pprot; 2924 int i; 2925 int noneg; 2926 int nopos; 2927 int maxpages; 2928 2929 /* 2930 * Get stable max count value, disabled if set to 0 2931 */ 2932 maxpages = vm_prefault_pages; 2933 cpu_ccfence(); 2934 if (maxpages <= 0) 2935 return; 2936 2937 /* 2938 * We do not currently prefault mappings that use virtual page 2939 * tables. We do not prefault foreign pmaps. 2940 */ 2941 if (entry->maptype != VM_MAPTYPE_NORMAL) 2942 return; 2943 lp = curthread->td_lwp; 2944 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 2945 return; 2946 2947 /* 2948 * Limit pre-fault count to 1024 pages. 2949 */ 2950 if (maxpages > 1024) 2951 maxpages = 1024; 2952 2953 ba = &entry->ba; 2954 object = entry->ba.object; 2955 KKASSERT(object != NULL); 2956 2957 /* 2958 * NOTE: VM_FAULT_DIRTY allowed later so must hold object exclusively 2959 * now (or do something more complex XXX). 2960 */ 2961 vm_object_hold(object); 2962 2963 noneg = 0; 2964 nopos = 0; 2965 for (i = 0; i < maxpages; ++i) { 2966 vm_object_t lobject; 2967 vm_object_t nobject; 2968 vm_map_backing_t last_ba; /* last ba */ 2969 vm_map_backing_t next_ba; /* last ba */ 2970 int allocated = 0; 2971 int error; 2972 2973 /* 2974 * This can eat a lot of time on a heavily contended 2975 * machine so yield on the tick if needed. 2976 */ 2977 if ((i & 7) == 7) 2978 lwkt_yield(); 2979 2980 /* 2981 * Calculate the page to pre-fault, stopping the scan in 2982 * each direction separately if the limit is reached. 2983 */ 2984 if (i & 1) { 2985 if (noneg) 2986 continue; 2987 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 2988 } else { 2989 if (nopos) 2990 continue; 2991 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 2992 } 2993 if (addr < entry->ba.start) { 2994 noneg = 1; 2995 if (noneg && nopos) 2996 break; 2997 continue; 2998 } 2999 if (addr >= entry->ba.end) { 3000 nopos = 1; 3001 if (noneg && nopos) 3002 break; 3003 continue; 3004 } 3005 3006 /* 3007 * Skip pages already mapped, and stop scanning in that 3008 * direction. When the scan terminates in both directions 3009 * we are done. 3010 */ 3011 if (pmap_prefault_ok(pmap, addr) == 0) { 3012 if (i & 1) 3013 noneg = 1; 3014 else 3015 nopos = 1; 3016 if (noneg && nopos) 3017 break; 3018 continue; 3019 } 3020 3021 /* 3022 * Follow the backing layers to obtain the page to be mapped 3023 * into the pmap. 3024 * 3025 * If we reach the terminal object without finding a page 3026 * and we determine it would be advantageous, then allocate 3027 * a zero-fill page for the base object. The base object 3028 * is guaranteed to be OBJT_DEFAULT for this case. 3029 * 3030 * In order to not have to check the pager via *haspage*() 3031 * we stop if any non-default object is encountered. e.g. 3032 * a vnode or swap object would stop the loop. 3033 */ 3034 index = ((addr - entry->ba.start) + entry->ba.offset) >> 3035 PAGE_SHIFT; 3036 last_ba = ba; 3037 lobject = object; 3038 pindex = index; 3039 pprot = prot; 3040 3041 /*vm_object_hold(lobject); implied */ 3042 3043 while ((m = vm_page_lookup_busy_try(lobject, pindex, 3044 TRUE, &error)) == NULL) { 3045 if (lobject->type != OBJT_DEFAULT) 3046 break; 3047 if ((next_ba = last_ba->backing_ba) == NULL) { 3048 if (vm_fast_fault == 0) 3049 break; 3050 if ((prot & VM_PROT_WRITE) == 0 || 3051 vm_page_count_min(0)) { 3052 break; 3053 } 3054 3055 /* 3056 * NOTE: Allocated from base object 3057 */ 3058 m = vm_page_alloc(object, index, 3059 VM_ALLOC_NORMAL | 3060 VM_ALLOC_ZERO | 3061 VM_ALLOC_USE_GD | 3062 VM_ALLOC_NULL_OK); 3063 if (m == NULL) 3064 break; 3065 allocated = 1; 3066 pprot = prot; 3067 /* lobject = object .. not needed */ 3068 break; 3069 } 3070 if (next_ba->offset & PAGE_MASK) 3071 break; 3072 nobject = next_ba->object; 3073 vm_object_hold(nobject); 3074 pindex -= last_ba->offset >> PAGE_SHIFT; 3075 pindex += next_ba->offset >> PAGE_SHIFT; 3076 if (last_ba != ba) { 3077 vm_object_lock_swap(); 3078 vm_object_drop(lobject); 3079 } 3080 lobject = nobject; 3081 last_ba = next_ba; 3082 pprot &= ~VM_PROT_WRITE; 3083 } 3084 3085 /* 3086 * NOTE: A non-NULL (m) will be associated with lobject if 3087 * it was found there, otherwise it is probably a 3088 * zero-fill page associated with the base object. 3089 * 3090 * Give-up if no page is available. 3091 */ 3092 if (m == NULL) { 3093 if (last_ba != ba) 3094 vm_object_drop(lobject); 3095 break; 3096 } 3097 3098 /* 3099 * The object must be marked dirty if we are mapping a 3100 * writable page. m->object is either lobject or object, 3101 * both of which are still held. Do this before we 3102 * potentially drop the object. 3103 */ 3104 if (pprot & VM_PROT_WRITE) 3105 vm_object_set_writeable_dirty(m->object); 3106 3107 /* 3108 * Do not conditionalize on PG_RAM. If pages are present in 3109 * the VM system we assume optimal caching. If caching is 3110 * not optimal the I/O gravy train will be restarted when we 3111 * hit an unavailable page. We do not want to try to restart 3112 * the gravy train now because we really don't know how much 3113 * of the object has been cached. The cost for restarting 3114 * the gravy train should be low (since accesses will likely 3115 * be I/O bound anyway). 3116 */ 3117 if (last_ba != ba) 3118 vm_object_drop(lobject); 3119 3120 /* 3121 * Enter the page into the pmap if appropriate. If we had 3122 * allocated the page we have to place it on a queue. If not 3123 * we just have to make sure it isn't on the cache queue 3124 * (pages on the cache queue are not allowed to be mapped). 3125 */ 3126 if (allocated) { 3127 /* 3128 * Page must be zerod. 3129 */ 3130 vm_page_zero_fill(m); 3131 mycpu->gd_cnt.v_zfod++; 3132 m->valid = VM_PAGE_BITS_ALL; 3133 3134 /* 3135 * Handle dirty page case 3136 */ 3137 if (pprot & VM_PROT_WRITE) 3138 vm_set_nosync(m, entry); 3139 pmap_enter(pmap, addr, m, pprot, 0, entry); 3140 mycpu->gd_cnt.v_vm_faults++; 3141 if (curthread->td_lwp) 3142 ++curthread->td_lwp->lwp_ru.ru_minflt; 3143 vm_page_deactivate(m); 3144 if (pprot & VM_PROT_WRITE) { 3145 /*vm_object_set_writeable_dirty(m->object);*/ 3146 vm_set_nosync(m, entry); 3147 if (fault_flags & VM_FAULT_DIRTY) { 3148 vm_page_dirty(m); 3149 /*XXX*/ 3150 swap_pager_unswapped(m); 3151 } 3152 } 3153 vm_page_wakeup(m); 3154 } else if (error) { 3155 /* couldn't busy page, no wakeup */ 3156 } else if ( 3157 ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 3158 (m->flags & PG_FICTITIOUS) == 0) { 3159 /* 3160 * A fully valid page not undergoing soft I/O can 3161 * be immediately entered into the pmap. 3162 */ 3163 if ((m->queue - m->pc) == PQ_CACHE) 3164 vm_page_deactivate(m); 3165 if (pprot & VM_PROT_WRITE) { 3166 /*vm_object_set_writeable_dirty(m->object);*/ 3167 vm_set_nosync(m, entry); 3168 if (fault_flags & VM_FAULT_DIRTY) { 3169 vm_page_dirty(m); 3170 /*XXX*/ 3171 swap_pager_unswapped(m); 3172 } 3173 } 3174 if (pprot & VM_PROT_WRITE) 3175 vm_set_nosync(m, entry); 3176 pmap_enter(pmap, addr, m, pprot, 0, entry); 3177 mycpu->gd_cnt.v_vm_faults++; 3178 if (curthread->td_lwp) 3179 ++curthread->td_lwp->lwp_ru.ru_minflt; 3180 vm_page_wakeup(m); 3181 } else { 3182 vm_page_wakeup(m); 3183 } 3184 } 3185 vm_object_drop(object); 3186 } 3187 3188 /* 3189 * Object can be held shared 3190 */ 3191 static void 3192 vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 3193 vm_map_entry_t entry, int prot, int fault_flags) 3194 { 3195 struct lwp *lp; 3196 vm_page_t m; 3197 vm_offset_t addr; 3198 vm_pindex_t pindex; 3199 vm_object_t object; 3200 int i; 3201 int noneg; 3202 int nopos; 3203 int maxpages; 3204 3205 /* 3206 * Get stable max count value, disabled if set to 0 3207 */ 3208 maxpages = vm_prefault_pages; 3209 cpu_ccfence(); 3210 if (maxpages <= 0) 3211 return; 3212 3213 /* 3214 * We do not currently prefault mappings that use virtual page 3215 * tables. We do not prefault foreign pmaps. 3216 */ 3217 if (entry->maptype != VM_MAPTYPE_NORMAL) 3218 return; 3219 lp = curthread->td_lwp; 3220 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 3221 return; 3222 object = entry->ba.object; 3223 if (entry->ba.backing_ba != NULL) 3224 return; 3225 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 3226 3227 /* 3228 * Limit pre-fault count to 1024 pages. 3229 */ 3230 if (maxpages > 1024) 3231 maxpages = 1024; 3232 3233 noneg = 0; 3234 nopos = 0; 3235 for (i = 0; i < maxpages; ++i) { 3236 int error; 3237 3238 /* 3239 * Calculate the page to pre-fault, stopping the scan in 3240 * each direction separately if the limit is reached. 3241 */ 3242 if (i & 1) { 3243 if (noneg) 3244 continue; 3245 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 3246 } else { 3247 if (nopos) 3248 continue; 3249 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 3250 } 3251 if (addr < entry->ba.start) { 3252 noneg = 1; 3253 if (noneg && nopos) 3254 break; 3255 continue; 3256 } 3257 if (addr >= entry->ba.end) { 3258 nopos = 1; 3259 if (noneg && nopos) 3260 break; 3261 continue; 3262 } 3263 3264 /* 3265 * Follow the VM object chain to obtain the page to be mapped 3266 * into the pmap. This version of the prefault code only 3267 * works with terminal objects. 3268 * 3269 * The page must already exist. If we encounter a problem 3270 * we stop here. 3271 * 3272 * WARNING! We cannot call swap_pager_unswapped() or insert 3273 * a new vm_page with a shared token. 3274 */ 3275 pindex = ((addr - entry->ba.start) + entry->ba.offset) >> 3276 PAGE_SHIFT; 3277 3278 /* 3279 * Skip pages already mapped, and stop scanning in that 3280 * direction. When the scan terminates in both directions 3281 * we are done. 3282 */ 3283 if (pmap_prefault_ok(pmap, addr) == 0) { 3284 if (i & 1) 3285 noneg = 1; 3286 else 3287 nopos = 1; 3288 if (noneg && nopos) 3289 break; 3290 continue; 3291 } 3292 3293 /* 3294 * Shortcut the read-only mapping case using the far more 3295 * efficient vm_page_lookup_sbusy_try() function. This 3296 * allows us to acquire the page soft-busied only which 3297 * is especially nice for concurrent execs of the same 3298 * program. 3299 * 3300 * The lookup function also validates page suitability 3301 * (all valid bits set, and not fictitious). 3302 * 3303 * If the page is in PQ_CACHE we have to fall-through 3304 * and hard-busy it so we can move it out of PQ_CACHE. 3305 */ 3306 if ((prot & VM_PROT_WRITE) == 0) { 3307 m = vm_page_lookup_sbusy_try(object, pindex, 3308 0, PAGE_SIZE); 3309 if (m == NULL) 3310 break; 3311 if ((m->queue - m->pc) != PQ_CACHE) { 3312 pmap_enter(pmap, addr, m, prot, 0, entry); 3313 mycpu->gd_cnt.v_vm_faults++; 3314 if (curthread->td_lwp) 3315 ++curthread->td_lwp->lwp_ru.ru_minflt; 3316 vm_page_sbusy_drop(m); 3317 continue; 3318 } 3319 vm_page_sbusy_drop(m); 3320 } 3321 3322 /* 3323 * Fallback to normal vm_page lookup code. This code 3324 * hard-busies the page. Not only that, but the page 3325 * can remain in that state for a significant period 3326 * time due to pmap_enter()'s overhead. 3327 */ 3328 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error); 3329 if (m == NULL || error) 3330 break; 3331 3332 /* 3333 * Stop if the page cannot be trivially entered into the 3334 * pmap. 3335 */ 3336 if (((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) || 3337 (m->flags & PG_FICTITIOUS) || 3338 ((m->flags & PG_SWAPPED) && 3339 (prot & VM_PROT_WRITE) && 3340 (fault_flags & VM_FAULT_DIRTY))) { 3341 vm_page_wakeup(m); 3342 break; 3343 } 3344 3345 /* 3346 * Enter the page into the pmap. The object might be held 3347 * shared so we can't do any (serious) modifying operation 3348 * on it. 3349 */ 3350 if ((m->queue - m->pc) == PQ_CACHE) 3351 vm_page_deactivate(m); 3352 if (prot & VM_PROT_WRITE) { 3353 vm_object_set_writeable_dirty(m->object); 3354 vm_set_nosync(m, entry); 3355 if (fault_flags & VM_FAULT_DIRTY) { 3356 vm_page_dirty(m); 3357 /* can't happeen due to conditional above */ 3358 /* swap_pager_unswapped(m); */ 3359 } 3360 } 3361 pmap_enter(pmap, addr, m, prot, 0, entry); 3362 mycpu->gd_cnt.v_vm_faults++; 3363 if (curthread->td_lwp) 3364 ++curthread->td_lwp->lwp_ru.ru_minflt; 3365 vm_page_wakeup(m); 3366 } 3367 } 3368