1 /* 2 * Copyright (c) 2003-2020 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * --- 35 * 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * Copyright (c) 1994 John S. Dyson 39 * All rights reserved. 40 * Copyright (c) 1994 David Greenman 41 * All rights reserved. 42 * 43 * 44 * This code is derived from software contributed to Berkeley by 45 * The Mach Operating System project at Carnegie-Mellon University. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * --- 72 * 73 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 74 * All rights reserved. 75 * 76 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 77 * 78 * Permission to use, copy, modify and distribute this software and 79 * its documentation is hereby granted, provided that both the copyright 80 * notice and this permission notice appear in all copies of the 81 * software, derivative works or modified versions, and any portions 82 * thereof, and that both notices appear in supporting documentation. 83 * 84 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 85 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 86 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 87 * 88 * Carnegie Mellon requests users of this software to return to 89 * 90 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 91 * School of Computer Science 92 * Carnegie Mellon University 93 * Pittsburgh PA 15213-3890 94 * 95 * any improvements or extensions that they make and grant Carnegie the 96 * rights to redistribute these changes. 97 */ 98 99 /* 100 * Page fault handling module. 101 */ 102 103 #include <sys/param.h> 104 #include <sys/systm.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/vnode.h> 108 #include <sys/resourcevar.h> 109 #include <sys/vmmeter.h> 110 #include <sys/vkernel.h> 111 #include <sys/lock.h> 112 #include <sys/sysctl.h> 113 114 #include <cpu/lwbuf.h> 115 116 #include <vm/vm.h> 117 #include <vm/vm_param.h> 118 #include <vm/pmap.h> 119 #include <vm/vm_map.h> 120 #include <vm/vm_object.h> 121 #include <vm/vm_page.h> 122 #include <vm/vm_pageout.h> 123 #include <vm/vm_kern.h> 124 #include <vm/vm_pager.h> 125 #include <vm/vnode_pager.h> 126 #include <vm/swap_pager.h> 127 #include <vm/vm_extern.h> 128 129 #include <vm/vm_page2.h> 130 131 #define VM_FAULT_MAX_QUICK 16 132 133 struct faultstate { 134 vm_page_t mary[VM_FAULT_MAX_QUICK]; 135 vm_map_backing_t ba; 136 vm_prot_t prot; 137 vm_page_t first_m; 138 vm_map_backing_t first_ba; 139 vm_prot_t first_prot; 140 vm_map_t map; 141 vm_map_entry_t entry; 142 int lookup_still_valid; /* 0=inv 1=valid/rel -1=valid/atomic */ 143 int hardfault; 144 int fault_flags; 145 int shared; 146 int msoftonly; 147 int first_shared; 148 int wflags; 149 int first_ba_held; /* 0=unlocked 1=locked/rel -1=lock/atomic */ 150 struct vnode *vp; 151 }; 152 153 __read_mostly static int debug_fault = 0; 154 SYSCTL_INT(_vm, OID_AUTO, debug_fault, CTLFLAG_RW, &debug_fault, 0, ""); 155 __read_mostly static int debug_cluster = 0; 156 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, ""); 157 #if 0 158 static int virtual_copy_enable = 1; 159 SYSCTL_INT(_vm, OID_AUTO, virtual_copy_enable, CTLFLAG_RW, 160 &virtual_copy_enable, 0, ""); 161 #endif 162 __read_mostly int vm_shared_fault = 1; 163 TUNABLE_INT("vm.shared_fault", &vm_shared_fault); 164 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, 165 &vm_shared_fault, 0, "Allow shared token on vm_object"); 166 __read_mostly static int vm_fault_bypass_count = 1; 167 TUNABLE_INT("vm.fault_bypass", &vm_fault_bypass_count); 168 SYSCTL_INT(_vm, OID_AUTO, fault_bypass, CTLFLAG_RW, 169 &vm_fault_bypass_count, 0, "Allow fast vm_fault shortcut"); 170 171 /* 172 * Define here for debugging ioctls. Note that these are globals, so 173 * they were cause a ton of cache line bouncing. Only use for debugging 174 * purposes. 175 */ 176 /*#define VM_FAULT_QUICK_DEBUG */ 177 #ifdef VM_FAULT_QUICK_DEBUG 178 static long vm_fault_bypass_success_count = 0; 179 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_success_count, CTLFLAG_RW, 180 &vm_fault_bypass_success_count, 0, ""); 181 static long vm_fault_bypass_failure_count1 = 0; 182 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count1, CTLFLAG_RW, 183 &vm_fault_bypass_failure_count1, 0, ""); 184 static long vm_fault_bypass_failure_count2 = 0; 185 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count2, CTLFLAG_RW, 186 &vm_fault_bypass_failure_count2, 0, ""); 187 static long vm_fault_bypass_failure_count3 = 0; 188 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count3, CTLFLAG_RW, 189 &vm_fault_bypass_failure_count3, 0, ""); 190 static long vm_fault_bypass_failure_count4 = 0; 191 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count4, CTLFLAG_RW, 192 &vm_fault_bypass_failure_count4, 0, ""); 193 #endif 194 195 static int vm_fault_bypass(struct faultstate *fs, vm_pindex_t first_pindex, 196 vm_pindex_t first_count, int *mextcountp, 197 vm_prot_t fault_type); 198 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int); 199 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry); 200 static void vm_prefault(pmap_t pmap, vm_offset_t addra, 201 vm_map_entry_t entry, int prot, int fault_flags); 202 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 203 vm_map_entry_t entry, int prot, int fault_flags); 204 205 static __inline void 206 release_page(struct faultstate *fs) 207 { 208 vm_page_deactivate(fs->mary[0]); 209 vm_page_wakeup(fs->mary[0]); 210 fs->mary[0] = NULL; 211 } 212 213 static __inline void 214 unlock_map(struct faultstate *fs) 215 { 216 if (fs->ba != fs->first_ba) 217 vm_object_drop(fs->ba->object); 218 if (fs->first_ba && fs->first_ba_held == 1) { 219 vm_object_drop(fs->first_ba->object); 220 fs->first_ba_held = 0; 221 fs->first_ba = NULL; 222 } 223 fs->ba = NULL; 224 225 /* 226 * NOTE: If lookup_still_valid == -1 the map is assumed to be locked 227 * and caller expects it to remain locked atomically. 228 */ 229 if (fs->lookup_still_valid == 1 && fs->map) { 230 vm_map_lookup_done(fs->map, fs->entry, 0); 231 fs->lookup_still_valid = 0; 232 fs->entry = NULL; 233 } 234 } 235 236 /* 237 * Clean up after a successful call to vm_fault_object() so another call 238 * to vm_fault_object() can be made. 239 */ 240 static void 241 cleanup_fault(struct faultstate *fs) 242 { 243 /* 244 * We allocated a junk page for a COW operation that did 245 * not occur, the page must be freed. 246 */ 247 if (fs->ba != fs->first_ba) { 248 KKASSERT(fs->first_shared == 0); 249 250 /* 251 * first_m could be completely valid and we got here 252 * because of a PG_RAM, don't mistakenly free it! 253 */ 254 if ((fs->first_m->valid & VM_PAGE_BITS_ALL) == 255 VM_PAGE_BITS_ALL) { 256 vm_page_wakeup(fs->first_m); 257 } else { 258 vm_page_free(fs->first_m); 259 } 260 vm_object_pip_wakeup(fs->ba->object); 261 fs->first_m = NULL; 262 263 /* 264 * Reset fs->ba without calling unlock_map(), so we need a 265 * little duplication. 266 */ 267 vm_object_drop(fs->ba->object); 268 fs->ba = fs->first_ba; 269 } 270 } 271 272 static void 273 unlock_things(struct faultstate *fs) 274 { 275 cleanup_fault(fs); 276 unlock_map(fs); 277 if (fs->vp != NULL) { 278 vput(fs->vp); 279 fs->vp = NULL; 280 } 281 } 282 283 #if 0 284 /* 285 * Virtual copy tests. Used by the fault code to determine if a 286 * page can be moved from an orphan vm_object into its shadow 287 * instead of copying its contents. 288 */ 289 static __inline int 290 virtual_copy_test(struct faultstate *fs) 291 { 292 /* 293 * Must be holding exclusive locks 294 */ 295 if (fs->first_shared || fs->shared || virtual_copy_enable == 0) 296 return 0; 297 298 /* 299 * Map, if present, has not changed 300 */ 301 if (fs->map && fs->map_generation != fs->map->timestamp) 302 return 0; 303 304 /* 305 * No refs, except us 306 */ 307 if (fs->ba->object->ref_count != 1) 308 return 0; 309 310 /* 311 * No one else can look this object up 312 */ 313 if (fs->ba->object->handle != NULL) 314 return 0; 315 316 /* 317 * No other ways to look the object up 318 */ 319 if (fs->ba->object->type != OBJT_DEFAULT && 320 fs->ba->object->type != OBJT_SWAP) 321 return 0; 322 323 /* 324 * We don't chase down the shadow chain 325 */ 326 if (fs->ba != fs->first_ba->backing_ba) 327 return 0; 328 329 return 1; 330 } 331 332 static __inline int 333 virtual_copy_ok(struct faultstate *fs) 334 { 335 if (virtual_copy_test(fs)) { 336 /* 337 * Grab the lock and re-test changeable items. 338 */ 339 if (fs->lookup_still_valid == 0 && fs->map) { 340 if (lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT)) 341 return 0; 342 fs->lookup_still_valid = 1; 343 if (virtual_copy_test(fs)) { 344 fs->map_generation = ++fs->map->timestamp; 345 return 1; 346 } 347 fs->lookup_still_valid = 0; 348 lockmgr(&fs->map->lock, LK_RELEASE); 349 } 350 } 351 return 0; 352 } 353 #endif 354 355 /* 356 * TRYPAGER 357 * 358 * Determine if the pager for the current object *might* contain the page. 359 * 360 * We only need to try the pager if this is not a default object (default 361 * objects are zero-fill and have no real pager), and if we are not taking 362 * a wiring fault or if the FS entry is wired. 363 */ 364 #define TRYPAGER(fs) \ 365 (fs->ba->object->type != OBJT_DEFAULT && \ 366 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || \ 367 (fs->wflags & FW_WIRED))) 368 369 /* 370 * vm_fault: 371 * 372 * Handle a page fault occuring at the given address, requiring the given 373 * permissions, in the map specified. If successful, the page is inserted 374 * into the associated physical map. 375 * 376 * NOTE: The given address should be truncated to the proper page address. 377 * 378 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 379 * a standard error specifying why the fault is fatal is returned. 380 * 381 * The map in question must be referenced, and remains so. 382 * The caller may hold no locks. 383 * No other requirements. 384 */ 385 int 386 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) 387 { 388 vm_pindex_t first_pindex; 389 vm_pindex_t first_count; 390 struct faultstate fs; 391 struct lwp *lp; 392 struct proc *p; 393 thread_t td; 394 int mextcount; 395 int growstack; 396 int retry = 0; 397 int inherit_prot; 398 int result; 399 int n; 400 401 inherit_prot = fault_type & VM_PROT_NOSYNC; 402 fs.hardfault = 0; 403 fs.fault_flags = fault_flags; 404 fs.vp = NULL; 405 fs.shared = vm_shared_fault; 406 fs.first_shared = vm_shared_fault; 407 growstack = 1; 408 409 /* 410 * vm_map interactions 411 */ 412 td = curthread; 413 if ((lp = td->td_lwp) != NULL) 414 lp->lwp_flags |= LWP_PAGING; 415 416 RetryFault: 417 /* 418 * vm_fault_bypass() can shortcut us. 419 */ 420 fs.msoftonly = 0; 421 fs.first_ba_held = 0; 422 mextcount = 1; 423 424 /* 425 * Find the vm_map_entry representing the backing store and resolve 426 * the top level object and page index. This may have the side 427 * effect of executing a copy-on-write on the map entry, 428 * creating a shadow object, or splitting an anonymous entry for 429 * performance, but will not COW any actual VM pages. 430 * 431 * On success fs.map is left read-locked and various other fields 432 * are initialized but not otherwise referenced or locked. 433 * 434 * NOTE! vm_map_lookup will try to upgrade the fault_type to 435 * VM_FAULT_WRITE if the map entry is a virtual page table 436 * and also writable, so we can set the 'A'accessed bit in 437 * the virtual page table entry. 438 */ 439 fs.map = map; 440 result = vm_map_lookup(&fs.map, vaddr, fault_type, 441 &fs.entry, &fs.first_ba, 442 &first_pindex, &first_count, 443 &fs.first_prot, &fs.wflags); 444 445 /* 446 * If the lookup failed or the map protections are incompatible, 447 * the fault generally fails. 448 * 449 * The failure could be due to TDF_NOFAULT if vm_map_lookup() 450 * tried to do a COW fault. 451 * 452 * If the caller is trying to do a user wiring we have more work 453 * to do. 454 */ 455 if (result != KERN_SUCCESS) { 456 if (result == KERN_FAILURE_NOFAULT) { 457 result = KERN_FAILURE; 458 goto done; 459 } 460 if (result != KERN_PROTECTION_FAILURE || 461 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 462 { 463 if (result == KERN_INVALID_ADDRESS && growstack && 464 map != &kernel_map && curproc != NULL) { 465 result = vm_map_growstack(map, vaddr); 466 if (result == KERN_SUCCESS) { 467 growstack = 0; 468 ++retry; 469 goto RetryFault; 470 } 471 result = KERN_FAILURE; 472 } 473 goto done; 474 } 475 476 /* 477 * If we are user-wiring a r/w segment, and it is COW, then 478 * we need to do the COW operation. Note that we don't 479 * currently COW RO sections now, because it is NOT desirable 480 * to COW .text. We simply keep .text from ever being COW'ed 481 * and take the heat that one cannot debug wired .text sections. 482 * 483 * XXX Try to allow the above by specifying OVERRIDE_WRITE. 484 */ 485 result = vm_map_lookup(&fs.map, vaddr, 486 VM_PROT_READ|VM_PROT_WRITE| 487 VM_PROT_OVERRIDE_WRITE, 488 &fs.entry, &fs.first_ba, 489 &first_pindex, &first_count, 490 &fs.first_prot, &fs.wflags); 491 if (result != KERN_SUCCESS) { 492 /* could also be KERN_FAILURE_NOFAULT */ 493 result = KERN_FAILURE; 494 goto done; 495 } 496 497 /* 498 * If we don't COW now, on a user wire, the user will never 499 * be able to write to the mapping. If we don't make this 500 * restriction, the bookkeeping would be nearly impossible. 501 * 502 * XXX We have a shared lock, this will have a MP race but 503 * I don't see how it can hurt anything. 504 */ 505 if ((fs.entry->protection & VM_PROT_WRITE) == 0) { 506 atomic_clear_char(&fs.entry->max_protection, 507 VM_PROT_WRITE); 508 } 509 } 510 511 /* 512 * fs.map is read-locked 513 * 514 * Misc checks. Save the map generation number to detect races. 515 */ 516 fs.lookup_still_valid = 1; 517 fs.first_m = NULL; 518 fs.ba = fs.first_ba; /* so unlock_things() works */ 519 fs.prot = fs.first_prot; /* default (used by uksmap) */ 520 521 if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) { 522 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 523 panic("vm_fault: fault on nofault entry, addr: %p", 524 (void *)vaddr); 525 } 526 if ((fs.entry->eflags & MAP_ENTRY_KSTACK) && 527 vaddr >= fs.entry->ba.start && 528 vaddr < fs.entry->ba.start + PAGE_SIZE) { 529 panic("vm_fault: fault on stack guard, addr: %p", 530 (void *)vaddr); 531 } 532 } 533 534 /* 535 * A user-kernel shared map has no VM object and bypasses 536 * everything. We execute the uksmap function with a temporary 537 * fictitious vm_page. The address is directly mapped with no 538 * management. 539 */ 540 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 541 struct vm_page fakem; 542 543 bzero(&fakem, sizeof(fakem)); 544 fakem.pindex = first_pindex; 545 fakem.flags = PG_FICTITIOUS | PG_UNQUEUED; 546 fakem.busy_count = PBUSY_LOCKED; 547 fakem.valid = VM_PAGE_BITS_ALL; 548 fakem.pat_mode = VM_MEMATTR_DEFAULT; 549 if (fs.entry->ba.uksmap(&fs.entry->ba, UKSMAPOP_FAULT, 550 fs.entry->aux.dev, &fakem)) { 551 result = KERN_FAILURE; 552 unlock_things(&fs); 553 goto done2; 554 } 555 pmap_enter(fs.map->pmap, vaddr, &fakem, fs.prot | inherit_prot, 556 (fs.wflags & FW_WIRED), fs.entry); 557 goto done_success; 558 } 559 560 /* 561 * A system map entry may return a NULL object. No object means 562 * no pager means an unrecoverable kernel fault. 563 */ 564 if (fs.first_ba == NULL) { 565 panic("vm_fault: unrecoverable fault at %p in entry %p", 566 (void *)vaddr, fs.entry); 567 } 568 569 /* 570 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 571 * is set. 572 * 573 * Unfortunately a deadlock can occur if we are forced to page-in 574 * from swap, but diving all the way into the vm_pager_get_page() 575 * function to find out is too much. Just check the object type. 576 * 577 * The deadlock is a CAM deadlock on a busy VM page when trying 578 * to finish an I/O if another process gets stuck in 579 * vop_helper_read_shortcut() due to a swap fault. 580 */ 581 if ((td->td_flags & TDF_NOFAULT) && 582 (retry || 583 fs.first_ba->object->type == OBJT_VNODE || 584 fs.first_ba->object->type == OBJT_SWAP || 585 fs.first_ba->backing_ba)) { 586 result = KERN_FAILURE; 587 unlock_things(&fs); 588 goto done2; 589 } 590 591 /* 592 * If the entry is wired we cannot change the page protection. 593 */ 594 if (fs.wflags & FW_WIRED) 595 fault_type = fs.first_prot; 596 597 /* 598 * We generally want to avoid unnecessary exclusive modes on backing 599 * and terminal objects because this can seriously interfere with 600 * heavily fork()'d processes (particularly /bin/sh scripts). 601 * 602 * However, we also want to avoid unnecessary retries due to needed 603 * shared->exclusive promotion for common faults. Exclusive mode is 604 * always needed if any page insertion, rename, or free occurs in an 605 * object (and also indirectly if any I/O is done). 606 * 607 * The main issue here is going to be fs.first_shared. If the 608 * first_object has a backing object which isn't shadowed and the 609 * process is single-threaded we might as well use an exclusive 610 * lock/chain right off the bat. 611 */ 612 #if 0 613 /* WORK IN PROGRESS, CODE REMOVED */ 614 if (fs.first_shared && fs.first_object->backing_object && 615 LIST_EMPTY(&fs.first_object->shadow_head) && 616 td->td_proc && td->td_proc->p_nthreads == 1) { 617 fs.first_shared = 0; 618 } 619 #endif 620 621 /* 622 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 623 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 624 * we can try shared first. 625 */ 626 if (fault_flags & VM_FAULT_UNSWAP) 627 fs.first_shared = 0; 628 629 /* 630 * Try to shortcut the entire mess and run the fault lockless. 631 * This will burst in multiple pages via fs->mary[]. 632 */ 633 if (vm_fault_bypass_count && 634 vm_fault_bypass(&fs, first_pindex, first_count, 635 &mextcount, fault_type) == KERN_SUCCESS) { 636 fault_flags &= ~VM_FAULT_BURST; 637 goto success; 638 } 639 640 /* 641 * Exclusive heuristic (alloc page vs page exists) 642 */ 643 if (fs.first_ba->flags & VM_MAP_BACK_EXCL_HEUR) 644 fs.first_shared = 0; 645 646 /* 647 * Obtain a top-level object lock, shared or exclusive depending 648 * on fs.first_shared. If a shared lock winds up being insufficient 649 * we will retry with an exclusive lock. 650 * 651 * The vnode pager lock is always shared. 652 */ 653 if (fs.first_shared) 654 vm_object_hold_shared(fs.first_ba->object); 655 else 656 vm_object_hold(fs.first_ba->object); 657 if (fs.vp == NULL) 658 fs.vp = vnode_pager_lock(fs.first_ba); 659 fs.first_ba_held = 1; 660 661 /* 662 * The page we want is at (first_object, first_pindex). 663 * 664 * Now we have the actual (object, pindex), fault in the page. If 665 * vm_fault_object() fails it will unlock and deallocate the FS 666 * data. If it succeeds everything remains locked and fs->ba->object 667 * will have an additional PIP count if fs->ba != fs->first_ba. 668 * 669 * vm_fault_object will set fs->prot for the pmap operation. It is 670 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the 671 * page can be safely written. However, it will force a read-only 672 * mapping for a read fault if the memory is managed by a virtual 673 * page table. 674 * 675 * If the fault code uses the shared object lock shortcut 676 * we must not try to burst (we can't allocate VM pages). 677 */ 678 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 679 680 if (debug_fault > 0) { 681 --debug_fault; 682 kprintf("VM_FAULT result %d addr=%jx type=%02x flags=%02x " 683 "fs.m=%p fs.prot=%02x fs.wflags=%02x fs.entry=%p\n", 684 result, (intmax_t)vaddr, fault_type, fault_flags, 685 fs.mary[0], fs.prot, fs.wflags, fs.entry); 686 } 687 688 if (result == KERN_TRY_AGAIN) { 689 ++retry; 690 goto RetryFault; 691 } 692 if (result != KERN_SUCCESS) { 693 goto done; 694 } 695 696 success: 697 /* 698 * On success vm_fault_object() does not unlock or deallocate, and fs.m 699 * will contain a busied page. It does drop fs->ba if appropriate. 700 * 701 * Enter the page into the pmap and do pmap-related adjustments. 702 * 703 * WARNING! Soft-busied fs.m's can only be manipulated in limited 704 * ways. 705 */ 706 KKASSERT(fs.lookup_still_valid != 0); 707 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 708 709 for (n = 0; n < mextcount; ++n) { 710 pmap_enter(fs.map->pmap, vaddr + (n << PAGE_SHIFT), 711 fs.mary[n], fs.prot | inherit_prot, 712 fs.wflags & FW_WIRED, fs.entry); 713 } 714 715 /* 716 * If the page is not wired down, then put it where the pageout daemon 717 * can find it. 718 * 719 * NOTE: We cannot safely wire, unwire, or adjust queues for a 720 * soft-busied page. 721 */ 722 for (n = 0; n < mextcount; ++n) { 723 if (fs.msoftonly) { 724 KKASSERT(fs.mary[n]->busy_count & PBUSY_MASK); 725 KKASSERT((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0); 726 vm_page_sbusy_drop(fs.mary[n]); 727 } else { 728 if (fs.fault_flags & VM_FAULT_WIRE_MASK) { 729 if (fs.wflags & FW_WIRED) 730 vm_page_wire(fs.mary[n]); 731 else 732 vm_page_unwire(fs.mary[n], 1); 733 } else { 734 vm_page_activate(fs.mary[n]); 735 } 736 KKASSERT(fs.mary[n]->busy_count & PBUSY_LOCKED); 737 vm_page_wakeup(fs.mary[n]); 738 } 739 } 740 741 /* 742 * Burst in a few more pages if possible. The fs.map should still 743 * be locked. To avoid interlocking against a vnode->getblk 744 * operation we had to be sure to unbusy our primary vm_page above 745 * first. 746 * 747 * A normal burst can continue down backing store, only execute 748 * if we are holding an exclusive lock, otherwise the exclusive 749 * locks the burst code gets might cause excessive SMP collisions. 750 * 751 * A quick burst can be utilized when there is no backing object 752 * (i.e. a shared file mmap). 753 */ 754 if ((fault_flags & VM_FAULT_BURST) && 755 (fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 && 756 (fs.wflags & FW_WIRED) == 0) { 757 if (fs.first_shared == 0 && fs.shared == 0) { 758 vm_prefault(fs.map->pmap, vaddr, 759 fs.entry, fs.prot, fault_flags); 760 } else { 761 vm_prefault_quick(fs.map->pmap, vaddr, 762 fs.entry, fs.prot, fault_flags); 763 } 764 } 765 766 done_success: 767 /* 768 * Unlock everything, and return 769 */ 770 unlock_things(&fs); 771 772 mycpu->gd_cnt.v_vm_faults++; 773 if (td->td_lwp) { 774 if (fs.hardfault) { 775 ++td->td_lwp->lwp_ru.ru_majflt; 776 } else { 777 ++td->td_lwp->lwp_ru.ru_minflt; 778 } 779 } 780 781 /*vm_object_deallocate(fs.first_ba->object);*/ 782 /*fs.m = NULL; */ 783 784 result = KERN_SUCCESS; 785 done: 786 if (fs.first_ba && fs.first_ba->object && fs.first_ba_held == 1) { 787 vm_object_drop(fs.first_ba->object); 788 fs.first_ba_held = 0; 789 } 790 done2: 791 if (lp) 792 lp->lwp_flags &= ~LWP_PAGING; 793 794 #if !defined(NO_SWAPPING) 795 /* 796 * Check the process RSS limit and force deactivation and 797 * (asynchronous) paging if necessary. This is a complex operation, 798 * only do it for direct user-mode faults, for now. 799 * 800 * To reduce overhead implement approximately a ~16MB hysteresis. 801 */ 802 p = td->td_proc; 803 if ((fault_flags & VM_FAULT_USERMODE) && lp && 804 p->p_limit && map->pmap && vm_pageout_memuse_mode >= 1 && 805 map != &kernel_map) { 806 vm_pindex_t limit; 807 vm_pindex_t size; 808 809 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 810 p->p_rlimit[RLIMIT_RSS].rlim_max)); 811 size = pmap_resident_tlnw_count(map->pmap); 812 if (limit >= 0 && size > 4096 && size - 4096 >= limit) { 813 vm_pageout_map_deactivate_pages(map, limit); 814 } 815 } 816 #endif 817 818 if (result != KERN_SUCCESS && debug_fault < 0) { 819 kprintf("VM_FAULT %d:%d (%s) result %d " 820 "addr=%jx type=%02x flags=%02x " 821 "fs.m=%p fs.prot=%02x fs.wflags=%02x fs.entry=%p\n", 822 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 823 (curthread->td_lwp ? curthread->td_lwp->lwp_tid : -1), 824 curthread->td_comm, 825 result, 826 (intmax_t)vaddr, fault_type, fault_flags, 827 fs.mary[0], fs.prot, fs.wflags, fs.entry); 828 while (debug_fault < 0 && (debug_fault & 1)) 829 tsleep(&debug_fault, 0, "DEBUG", hz); 830 } 831 832 return (result); 833 } 834 835 /* 836 * Attempt a lockless vm_fault() shortcut. The stars have to align for this 837 * to work. But if it does we can get our page only soft-busied and not 838 * have to touch the vm_object or vnode locks at all. 839 */ 840 static 841 int 842 vm_fault_bypass(struct faultstate *fs, vm_pindex_t first_pindex, 843 vm_pindex_t first_count, int *mextcountp, 844 vm_prot_t fault_type) 845 { 846 vm_page_t m; 847 vm_object_t obj; /* NOT LOCKED */ 848 int n; 849 int nlim; 850 851 /* 852 * Don't waste time if the object is only being used by one vm_map. 853 */ 854 obj = fs->first_ba->object; 855 #if 0 856 if (obj->flags & OBJ_ONEMAPPING) 857 return KERN_FAILURE; 858 #endif 859 860 /* 861 * This will try to wire/unwire a page, which can't be done with 862 * a soft-busied page. 863 */ 864 if (fs->fault_flags & VM_FAULT_WIRE_MASK) 865 return KERN_FAILURE; 866 867 /* 868 * Ok, try to get the vm_page quickly via the hash table. The 869 * page will be soft-busied on success (NOT hard-busied). 870 */ 871 m = vm_page_hash_get(obj, first_pindex); 872 if (m == NULL) { 873 #ifdef VM_FAULT_QUICK_DEBUG 874 ++vm_fault_bypass_failure_count2; 875 #endif 876 return KERN_FAILURE; 877 } 878 if ((obj->flags & OBJ_DEAD) || 879 m->valid != VM_PAGE_BITS_ALL || 880 m->queue - m->pc != PQ_ACTIVE || 881 (m->flags & PG_SWAPPED)) { 882 vm_page_sbusy_drop(m); 883 #ifdef VM_FAULT_QUICK_DEBUG 884 ++vm_fault_bypass_failure_count3; 885 #endif 886 return KERN_FAILURE; 887 } 888 889 /* 890 * The page is already fully valid, ACTIVE, and is not PG_SWAPPED. 891 * 892 * Don't map the page writable when emulating the dirty bit, a 893 * fault must be taken for proper emulation (vkernel). 894 */ 895 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 896 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 897 if ((fault_type & VM_PROT_WRITE) == 0) 898 fs->prot &= ~VM_PROT_WRITE; 899 } 900 901 /* 902 * If this is a write fault the object and the page must already 903 * be writable. Since we don't hold an object lock and only a 904 * soft-busy on the page, we cannot manipulate the object or 905 * the page state (other than the page queue). 906 */ 907 if (fs->prot & VM_PROT_WRITE) { 908 if ((obj->flags & (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY)) != 909 (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY) || 910 m->dirty != VM_PAGE_BITS_ALL) { 911 vm_page_sbusy_drop(m); 912 #ifdef VM_FAULT_QUICK_DEBUG 913 ++vm_fault_bypass_failure_count4; 914 #endif 915 return KERN_FAILURE; 916 } 917 vm_set_nosync(m, fs->entry); 918 } 919 920 /* 921 * Set page and potentially burst in more 922 * 923 * Even though we are only soft-busied we can still move pages 924 * around in the normal queue(s). The soft-busy prevents the 925 * page from being removed from the object, etc (normal operation). 926 * 927 * However, in this fast path it is excessively important to avoid 928 * any hard locks, so we use a special passive version of activate. 929 */ 930 fs->msoftonly = 1; 931 fs->mary[0] = m; 932 vm_page_soft_activate(m); 933 934 if (vm_fault_bypass_count > 1) { 935 nlim = vm_fault_bypass_count; 936 if (nlim > VM_FAULT_MAX_QUICK) /* array limit(+1) */ 937 nlim = VM_FAULT_MAX_QUICK; 938 if (nlim > first_count) /* user limit */ 939 nlim = first_count; 940 941 for (n = 1; n < nlim; ++n) { 942 m = vm_page_hash_get(obj, first_pindex + n); 943 if (m == NULL) 944 break; 945 if (m->valid != VM_PAGE_BITS_ALL || 946 m->queue - m->pc != PQ_ACTIVE || 947 (m->flags & PG_SWAPPED)) { 948 vm_page_sbusy_drop(m); 949 break; 950 } 951 if (fs->prot & VM_PROT_WRITE) { 952 if ((obj->flags & (OBJ_WRITEABLE | 953 OBJ_MIGHTBEDIRTY)) != 954 (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY) || 955 m->dirty != VM_PAGE_BITS_ALL) { 956 vm_page_sbusy_drop(m); 957 break; 958 } 959 } 960 vm_page_soft_activate(m); 961 fs->mary[n] = m; 962 } 963 *mextcountp = n; 964 } 965 966 #ifdef VM_FAULT_QUICK_DEBUG 967 ++vm_fault_bypass_success_count; 968 #endif 969 970 return KERN_SUCCESS; 971 } 972 973 /* 974 * Fault in the specified virtual address in the current process map, 975 * returning a held VM page or NULL. See vm_fault_page() for more 976 * information. 977 * 978 * No requirements. 979 */ 980 vm_page_t 981 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, 982 int *errorp, int *busyp) 983 { 984 struct lwp *lp = curthread->td_lwp; 985 vm_page_t m; 986 987 m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 988 fault_type, VM_FAULT_NORMAL, 989 errorp, busyp); 990 return(m); 991 } 992 993 /* 994 * Fault in the specified virtual address in the specified map, doing all 995 * necessary manipulation of the object store and all necessary I/O. Return 996 * a held VM page or NULL, and set *errorp. The related pmap is not 997 * updated. 998 * 999 * If busyp is not NULL then *busyp will be set to TRUE if this routine 1000 * decides to return a busied page (aka VM_PROT_WRITE), or FALSE if it 1001 * does not (VM_PROT_WRITE not specified or busyp is NULL). If busyp is 1002 * NULL the returned page is only held. 1003 * 1004 * If the caller has no intention of writing to the page's contents, busyp 1005 * can be passed as NULL along with VM_PROT_WRITE to force a COW operation 1006 * without busying the page. 1007 * 1008 * The returned page will also be marked PG_REFERENCED. 1009 * 1010 * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an 1011 * error will be returned. 1012 * 1013 * No requirements. 1014 */ 1015 vm_page_t 1016 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1017 int fault_flags, int *errorp, int *busyp) 1018 { 1019 vm_pindex_t first_pindex; 1020 vm_pindex_t first_count; 1021 struct faultstate fs; 1022 int result; 1023 int retry; 1024 int growstack; 1025 int didcow; 1026 vm_prot_t orig_fault_type = fault_type; 1027 1028 retry = 0; 1029 didcow = 0; 1030 fs.hardfault = 0; 1031 fs.fault_flags = fault_flags; 1032 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 1033 1034 /* 1035 * Dive the pmap (concurrency possible). If we find the 1036 * appropriate page we can terminate early and quickly. 1037 * 1038 * This works great for normal programs but will always return 1039 * NULL for host lookups of vkernel maps in VMM mode. 1040 * 1041 * NOTE: pmap_fault_page_quick() might not busy the page. If 1042 * VM_PROT_WRITE is set in fault_type and pmap_fault_page_quick() 1043 * returns non-NULL, it will safely dirty the returned vm_page_t 1044 * for us. We cannot safely dirty it here (it might not be 1045 * busy). 1046 */ 1047 fs.mary[0] = pmap_fault_page_quick(map->pmap, vaddr, fault_type, busyp); 1048 if (fs.mary[0]) { 1049 *errorp = 0; 1050 return(fs.mary[0]); 1051 } 1052 1053 /* 1054 * Otherwise take a concurrency hit and do a formal page 1055 * fault. 1056 */ 1057 fs.vp = NULL; 1058 fs.shared = vm_shared_fault; 1059 fs.first_shared = vm_shared_fault; 1060 fs.msoftonly = 0; 1061 growstack = 1; 1062 1063 /* 1064 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 1065 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 1066 * we can try shared first. 1067 */ 1068 if (fault_flags & VM_FAULT_UNSWAP) { 1069 fs.first_shared = 0; 1070 } 1071 1072 RetryFault: 1073 /* 1074 * Find the vm_map_entry representing the backing store and resolve 1075 * the top level object and page index. This may have the side 1076 * effect of executing a copy-on-write on the map entry and/or 1077 * creating a shadow object, but will not COW any actual VM pages. 1078 * 1079 * On success fs.map is left read-locked and various other fields 1080 * are initialized but not otherwise referenced or locked. 1081 * 1082 * NOTE! vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE 1083 * if the map entry is a virtual page table and also writable, 1084 * so we can set the 'A'accessed bit in the virtual page table 1085 * entry. 1086 */ 1087 fs.map = map; 1088 fs.first_ba_held = 0; 1089 result = vm_map_lookup(&fs.map, vaddr, fault_type, 1090 &fs.entry, &fs.first_ba, 1091 &first_pindex, &first_count, 1092 &fs.first_prot, &fs.wflags); 1093 1094 if (result != KERN_SUCCESS) { 1095 if (result == KERN_FAILURE_NOFAULT) { 1096 *errorp = KERN_FAILURE; 1097 fs.mary[0] = NULL; 1098 goto done; 1099 } 1100 if (result != KERN_PROTECTION_FAILURE || 1101 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 1102 { 1103 if (result == KERN_INVALID_ADDRESS && growstack && 1104 map != &kernel_map && curproc != NULL) { 1105 result = vm_map_growstack(map, vaddr); 1106 if (result == KERN_SUCCESS) { 1107 growstack = 0; 1108 ++retry; 1109 goto RetryFault; 1110 } 1111 result = KERN_FAILURE; 1112 } 1113 fs.mary[0] = NULL; 1114 *errorp = result; 1115 goto done; 1116 } 1117 1118 /* 1119 * If we are user-wiring a r/w segment, and it is COW, then 1120 * we need to do the COW operation. Note that we don't 1121 * currently COW RO sections now, because it is NOT desirable 1122 * to COW .text. We simply keep .text from ever being COW'ed 1123 * and take the heat that one cannot debug wired .text sections. 1124 */ 1125 result = vm_map_lookup(&fs.map, vaddr, 1126 VM_PROT_READ|VM_PROT_WRITE| 1127 VM_PROT_OVERRIDE_WRITE, 1128 &fs.entry, &fs.first_ba, 1129 &first_pindex, &first_count, 1130 &fs.first_prot, &fs.wflags); 1131 if (result != KERN_SUCCESS) { 1132 /* could also be KERN_FAILURE_NOFAULT */ 1133 *errorp = KERN_FAILURE; 1134 fs.mary[0] = NULL; 1135 goto done; 1136 } 1137 1138 /* 1139 * If we don't COW now, on a user wire, the user will never 1140 * be able to write to the mapping. If we don't make this 1141 * restriction, the bookkeeping would be nearly impossible. 1142 * 1143 * XXX We have a shared lock, this will have a MP race but 1144 * I don't see how it can hurt anything. 1145 */ 1146 if ((fs.entry->protection & VM_PROT_WRITE) == 0) { 1147 atomic_clear_char(&fs.entry->max_protection, 1148 VM_PROT_WRITE); 1149 } 1150 } 1151 1152 /* 1153 * fs.map is read-locked 1154 * 1155 * Misc checks. Save the map generation number to detect races. 1156 */ 1157 fs.lookup_still_valid = 1; 1158 fs.first_m = NULL; 1159 fs.ba = fs.first_ba; 1160 1161 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 1162 panic("vm_fault: fault on nofault entry, addr: %lx", 1163 (u_long)vaddr); 1164 } 1165 1166 /* 1167 * A user-kernel shared map has no VM object and bypasses 1168 * everything. We execute the uksmap function with a temporary 1169 * fictitious vm_page. The address is directly mapped with no 1170 * management. 1171 */ 1172 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 1173 struct vm_page fakem; 1174 1175 bzero(&fakem, sizeof(fakem)); 1176 fakem.pindex = first_pindex; 1177 fakem.flags = PG_FICTITIOUS | PG_UNQUEUED; 1178 fakem.busy_count = PBUSY_LOCKED; 1179 fakem.valid = VM_PAGE_BITS_ALL; 1180 fakem.pat_mode = VM_MEMATTR_DEFAULT; 1181 if (fs.entry->ba.uksmap(&fs.entry->ba, UKSMAPOP_FAULT, 1182 fs.entry->aux.dev, &fakem)) { 1183 *errorp = KERN_FAILURE; 1184 fs.mary[0] = NULL; 1185 unlock_things(&fs); 1186 goto done2; 1187 } 1188 fs.mary[0] = PHYS_TO_VM_PAGE(fakem.phys_addr); 1189 vm_page_hold(fs.mary[0]); 1190 if (busyp) 1191 *busyp = 0; /* don't need to busy R or W */ 1192 unlock_things(&fs); 1193 *errorp = 0; 1194 goto done; 1195 } 1196 1197 1198 /* 1199 * A system map entry may return a NULL object. No object means 1200 * no pager means an unrecoverable kernel fault. 1201 */ 1202 if (fs.first_ba == NULL) { 1203 panic("vm_fault: unrecoverable fault at %p in entry %p", 1204 (void *)vaddr, fs.entry); 1205 } 1206 1207 /* 1208 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 1209 * is set. 1210 * 1211 * Unfortunately a deadlock can occur if we are forced to page-in 1212 * from swap, but diving all the way into the vm_pager_get_page() 1213 * function to find out is too much. Just check the object type. 1214 */ 1215 if ((curthread->td_flags & TDF_NOFAULT) && 1216 (retry || 1217 fs.first_ba->object->type == OBJT_VNODE || 1218 fs.first_ba->object->type == OBJT_SWAP || 1219 fs.first_ba->backing_ba)) { 1220 *errorp = KERN_FAILURE; 1221 unlock_things(&fs); 1222 fs.mary[0] = NULL; 1223 goto done2; 1224 } 1225 1226 /* 1227 * If the entry is wired we cannot change the page protection. 1228 */ 1229 if (fs.wflags & FW_WIRED) 1230 fault_type = fs.first_prot; 1231 1232 /* 1233 * Make a reference to this object to prevent its disposal while we 1234 * are messing with it. Once we have the reference, the map is free 1235 * to be diddled. Since objects reference their shadows (and copies), 1236 * they will stay around as well. 1237 * 1238 * The reference should also prevent an unexpected collapse of the 1239 * parent that might move pages from the current object into the 1240 * parent unexpectedly, resulting in corruption. 1241 * 1242 * Bump the paging-in-progress count to prevent size changes (e.g. 1243 * truncation operations) during I/O. This must be done after 1244 * obtaining the vnode lock in order to avoid possible deadlocks. 1245 */ 1246 if (fs.first_ba->flags & VM_MAP_BACK_EXCL_HEUR) 1247 fs.first_shared = 0; 1248 1249 if (fs.first_shared) 1250 vm_object_hold_shared(fs.first_ba->object); 1251 else 1252 vm_object_hold(fs.first_ba->object); 1253 fs.first_ba_held = 1; 1254 if (fs.vp == NULL) 1255 fs.vp = vnode_pager_lock(fs.first_ba); /* shared */ 1256 1257 /* 1258 * The page we want is at (first_object, first_pindex). 1259 * 1260 * Now we have the actual (object, pindex), fault in the page. If 1261 * vm_fault_object() fails it will unlock and deallocate the FS 1262 * data. If it succeeds everything remains locked and fs->ba->object 1263 * will have an additinal PIP count if fs->ba != fs->first_ba. 1264 */ 1265 fs.mary[0] = NULL; 1266 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 1267 1268 if (result == KERN_TRY_AGAIN) { 1269 KKASSERT(fs.first_ba_held == 0); 1270 ++retry; 1271 didcow |= fs.wflags & FW_DIDCOW; 1272 goto RetryFault; 1273 } 1274 if (result != KERN_SUCCESS) { 1275 *errorp = result; 1276 fs.mary[0] = NULL; 1277 goto done; 1278 } 1279 1280 if ((orig_fault_type & VM_PROT_WRITE) && 1281 (fs.prot & VM_PROT_WRITE) == 0) { 1282 *errorp = KERN_PROTECTION_FAILURE; 1283 unlock_things(&fs); 1284 fs.mary[0] = NULL; 1285 goto done; 1286 } 1287 1288 /* 1289 * Generally speaking we don't want to update the pmap because 1290 * this routine can be called many times for situations that do 1291 * not require updating the pmap, not to mention the page might 1292 * already be in the pmap. 1293 * 1294 * However, if our vm_map_lookup() results in a COW, we need to 1295 * at least remove the pte from the pmap to guarantee proper 1296 * visibility of modifications made to the process. For example, 1297 * modifications made by vkernel uiocopy/related routines and 1298 * modifications made by ptrace(). 1299 */ 1300 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 1301 #if 0 1302 pmap_enter(fs.map->pmap, vaddr, fs.mary[0], fs.prot, 1303 fs.wflags & FW_WIRED, NULL); 1304 mycpu->gd_cnt.v_vm_faults++; 1305 if (curthread->td_lwp) 1306 ++curthread->td_lwp->lwp_ru.ru_minflt; 1307 #endif 1308 if ((fs.wflags | didcow) & FW_DIDCOW) { 1309 pmap_remove(fs.map->pmap, 1310 vaddr & ~PAGE_MASK, 1311 (vaddr & ~PAGE_MASK) + PAGE_SIZE); 1312 } 1313 1314 /* 1315 * On success vm_fault_object() does not unlock or deallocate, and 1316 * fs.mary[0] will contain a busied page. So we must unlock here 1317 * after having messed with the pmap. 1318 */ 1319 unlock_things(&fs); 1320 1321 /* 1322 * Return a held page. We are not doing any pmap manipulation so do 1323 * not set PG_MAPPED. However, adjust the page flags according to 1324 * the fault type because the caller may not use a managed pmapping 1325 * (so we don't want to lose the fact that the page will be dirtied 1326 * if a write fault was specified). 1327 */ 1328 if (fault_type & VM_PROT_WRITE) 1329 vm_page_dirty(fs.mary[0]); 1330 vm_page_activate(fs.mary[0]); 1331 1332 if (curthread->td_lwp) { 1333 if (fs.hardfault) { 1334 curthread->td_lwp->lwp_ru.ru_majflt++; 1335 } else { 1336 curthread->td_lwp->lwp_ru.ru_minflt++; 1337 } 1338 } 1339 1340 /* 1341 * Unlock everything, and return the held or busied page. 1342 */ 1343 if (busyp) { 1344 if (fault_type & VM_PROT_WRITE) { 1345 vm_page_dirty(fs.mary[0]); 1346 *busyp = 1; 1347 } else { 1348 *busyp = 0; 1349 vm_page_hold(fs.mary[0]); 1350 vm_page_wakeup(fs.mary[0]); 1351 } 1352 } else { 1353 vm_page_hold(fs.mary[0]); 1354 vm_page_wakeup(fs.mary[0]); 1355 } 1356 /*vm_object_deallocate(fs.first_ba->object);*/ 1357 *errorp = 0; 1358 1359 done: 1360 KKASSERT(fs.first_ba_held == 0); 1361 done2: 1362 return(fs.mary[0]); 1363 } 1364 1365 /* 1366 * Fault in the specified (object,offset), dirty the returned page as 1367 * needed. If the requested fault_type cannot be done NULL and an 1368 * error is returned. 1369 * 1370 * A held (but not busied) page is returned. 1371 * 1372 * The passed in object must be held as specified by the shared 1373 * argument. 1374 */ 1375 vm_page_t 1376 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset, 1377 vm_prot_t fault_type, int fault_flags, 1378 int *sharedp, int *errorp) 1379 { 1380 int result; 1381 vm_pindex_t first_pindex; 1382 vm_pindex_t first_count; 1383 struct faultstate fs; 1384 struct vm_map_entry entry; 1385 1386 /* 1387 * Since we aren't actually faulting the page into a 1388 * pmap we can just fake the entry.ba. 1389 */ 1390 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1391 bzero(&entry, sizeof(entry)); 1392 entry.maptype = VM_MAPTYPE_NORMAL; 1393 entry.protection = entry.max_protection = fault_type; 1394 entry.ba.backing_ba = NULL; 1395 entry.ba.object = object; 1396 entry.ba.offset = 0; 1397 1398 fs.hardfault = 0; 1399 fs.fault_flags = fault_flags; 1400 fs.map = NULL; 1401 fs.shared = vm_shared_fault; 1402 fs.first_shared = *sharedp; 1403 fs.msoftonly = 0; 1404 fs.vp = NULL; 1405 fs.first_ba_held = -1; /* object held across call, prevent drop */ 1406 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 1407 1408 /* 1409 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 1410 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 1411 * we can try shared first. 1412 */ 1413 if (fs.first_shared && (fault_flags & VM_FAULT_UNSWAP)) { 1414 fs.first_shared = 0; 1415 vm_object_upgrade(object); 1416 } 1417 1418 /* 1419 * Retry loop as needed (typically for shared->exclusive transitions) 1420 */ 1421 RetryFault: 1422 *sharedp = fs.first_shared; 1423 first_pindex = OFF_TO_IDX(offset); 1424 first_count = 1; 1425 fs.first_ba = &entry.ba; 1426 fs.ba = fs.first_ba; 1427 fs.entry = &entry; 1428 fs.first_prot = fault_type; 1429 fs.wflags = 0; 1430 1431 /* 1432 * Make a reference to this object to prevent its disposal while we 1433 * are messing with it. Once we have the reference, the map is free 1434 * to be diddled. Since objects reference their shadows (and copies), 1435 * they will stay around as well. 1436 * 1437 * The reference should also prevent an unexpected collapse of the 1438 * parent that might move pages from the current object into the 1439 * parent unexpectedly, resulting in corruption. 1440 * 1441 * Bump the paging-in-progress count to prevent size changes (e.g. 1442 * truncation operations) during I/O. This must be done after 1443 * obtaining the vnode lock in order to avoid possible deadlocks. 1444 */ 1445 if (fs.vp == NULL) 1446 fs.vp = vnode_pager_lock(fs.first_ba); 1447 1448 fs.lookup_still_valid = 1; 1449 fs.first_m = NULL; 1450 1451 /* 1452 * Now we have the actual (object, pindex), fault in the page. If 1453 * vm_fault_object() fails it will unlock and deallocate the FS 1454 * data. If it succeeds everything remains locked and fs->ba->object 1455 * will have an additinal PIP count if fs->ba != fs->first_ba. 1456 * 1457 * On KERN_TRY_AGAIN vm_fault_object() leaves fs.first_ba intact. 1458 * We may have to upgrade its lock to handle the requested fault. 1459 */ 1460 result = vm_fault_object(&fs, first_pindex, fault_type, 0); 1461 1462 if (result == KERN_TRY_AGAIN) { 1463 if (fs.first_shared == 0 && *sharedp) 1464 vm_object_upgrade(object); 1465 goto RetryFault; 1466 } 1467 if (result != KERN_SUCCESS) { 1468 *errorp = result; 1469 return(NULL); 1470 } 1471 1472 if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) { 1473 *errorp = KERN_PROTECTION_FAILURE; 1474 unlock_things(&fs); 1475 return(NULL); 1476 } 1477 1478 /* 1479 * On success vm_fault_object() does not unlock or deallocate, so we 1480 * do it here. Note that the returned fs.m will be busied. 1481 */ 1482 unlock_things(&fs); 1483 1484 /* 1485 * Return a held page. We are not doing any pmap manipulation so do 1486 * not set PG_MAPPED. However, adjust the page flags according to 1487 * the fault type because the caller may not use a managed pmapping 1488 * (so we don't want to lose the fact that the page will be dirtied 1489 * if a write fault was specified). 1490 */ 1491 vm_page_hold(fs.mary[0]); 1492 vm_page_activate(fs.mary[0]); 1493 if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY)) 1494 vm_page_dirty(fs.mary[0]); 1495 if (fault_flags & VM_FAULT_UNSWAP) 1496 swap_pager_unswapped(fs.mary[0]); 1497 1498 /* 1499 * Indicate that the page was accessed. 1500 */ 1501 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 1502 1503 if (curthread->td_lwp) { 1504 if (fs.hardfault) { 1505 curthread->td_lwp->lwp_ru.ru_majflt++; 1506 } else { 1507 curthread->td_lwp->lwp_ru.ru_minflt++; 1508 } 1509 } 1510 1511 /* 1512 * Unlock everything, and return the held page. 1513 */ 1514 vm_page_wakeup(fs.mary[0]); 1515 /*vm_object_deallocate(fs.first_ba->object);*/ 1516 1517 *errorp = 0; 1518 return(fs.mary[0]); 1519 } 1520 1521 /* 1522 * This is the core of the vm_fault code. 1523 * 1524 * Do all operations required to fault-in (fs.first_ba->object, pindex). 1525 * Run through the backing store as necessary and do required COW or virtual 1526 * copy operations. The caller has already fully resolved the vm_map_entry 1527 * and, if appropriate, has created a copy-on-write layer. All we need to 1528 * do is iterate the object chain. 1529 * 1530 * On failure (fs) is unlocked and deallocated and the caller may return or 1531 * retry depending on the failure code. On success (fs) is NOT unlocked or 1532 * deallocated, fs.mary[0] will contained a resolved, busied page, and fs.ba's 1533 * object will have an additional PIP count if it is not equal to 1534 * fs.first_ba. 1535 * 1536 * If locks based on fs->first_shared or fs->shared are insufficient, 1537 * clear the appropriate field(s) and return RETRY. COWs require that 1538 * first_shared be 0, while page allocations (or frees) require that 1539 * shared be 0. Renames require that both be 0. 1540 * 1541 * NOTE! fs->[first_]shared might be set with VM_FAULT_DIRTY also set. 1542 * we will have to retry with it exclusive if the vm_page is 1543 * PG_SWAPPED. 1544 * 1545 * fs->first_ba->object must be held on call. 1546 */ 1547 static 1548 int 1549 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex, 1550 vm_prot_t fault_type, int allow_nofault) 1551 { 1552 vm_map_backing_t next_ba; 1553 vm_pindex_t pindex; 1554 int error; 1555 1556 ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_ba->object)); 1557 fs->prot = fs->first_prot; 1558 pindex = first_pindex; 1559 KKASSERT(fs->ba == fs->first_ba); 1560 1561 vm_object_pip_add(fs->first_ba->object, 1); 1562 1563 /* 1564 * If a read fault occurs we try to upgrade the page protection 1565 * and make it also writable if possible. There are three cases 1566 * where we cannot make the page mapping writable: 1567 * 1568 * (1) The mapping is read-only or the VM object is read-only, 1569 * fs->prot above will simply not have VM_PROT_WRITE set. 1570 * 1571 * (2) If the VM page is read-only or copy-on-write, upgrading would 1572 * just result in an unnecessary COW fault. 1573 * 1574 * (3) If the pmap specifically requests A/M bit emulation, downgrade 1575 * here. 1576 */ 1577 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 1578 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 1579 if ((fault_type & VM_PROT_WRITE) == 0) 1580 fs->prot &= ~VM_PROT_WRITE; 1581 } 1582 1583 /* vm_object_hold(fs->ba->object); implied b/c ba == first_ba */ 1584 1585 for (;;) { 1586 /* 1587 * If the object is dead, we stop here 1588 */ 1589 if (fs->ba->object->flags & OBJ_DEAD) { 1590 vm_object_pip_wakeup(fs->first_ba->object); 1591 unlock_things(fs); 1592 return (KERN_PROTECTION_FAILURE); 1593 } 1594 1595 /* 1596 * See if the page is resident. Wait/Retry if the page is 1597 * busy (lots of stuff may have changed so we can't continue 1598 * in that case). 1599 * 1600 * We can theoretically allow the soft-busy case on a read 1601 * fault if the page is marked valid, but since such 1602 * pages are typically already pmap'd, putting that 1603 * special case in might be more effort then it is 1604 * worth. We cannot under any circumstances mess 1605 * around with a vm_page_t->busy page except, perhaps, 1606 * to pmap it. 1607 */ 1608 fs->mary[0] = vm_page_lookup_busy_try(fs->ba->object, pindex, 1609 TRUE, &error); 1610 if (error) { 1611 vm_object_pip_wakeup(fs->first_ba->object); 1612 unlock_things(fs); 1613 vm_page_sleep_busy(fs->mary[0], TRUE, "vmpfw"); 1614 mycpu->gd_cnt.v_intrans++; 1615 fs->mary[0] = NULL; 1616 return (KERN_TRY_AGAIN); 1617 } 1618 if (fs->mary[0]) { 1619 /* 1620 * The page is busied for us. 1621 * 1622 * If reactivating a page from PQ_CACHE we may have 1623 * to rate-limit. 1624 */ 1625 int queue = fs->mary[0]->queue; 1626 vm_page_unqueue_nowakeup(fs->mary[0]); 1627 1628 if ((queue - fs->mary[0]->pc) == PQ_CACHE && 1629 vm_paging_severe()) { 1630 vm_page_activate(fs->mary[0]); 1631 vm_page_wakeup(fs->mary[0]); 1632 fs->mary[0] = NULL; 1633 vm_object_pip_wakeup(fs->first_ba->object); 1634 unlock_things(fs); 1635 if (allow_nofault == 0 || 1636 (curthread->td_flags & TDF_NOFAULT) == 0) { 1637 thread_t td; 1638 1639 vm_wait_pfault(); 1640 td = curthread; 1641 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1642 return (KERN_PROTECTION_FAILURE); 1643 } 1644 return (KERN_TRY_AGAIN); 1645 } 1646 1647 /* 1648 * If it still isn't completely valid (readable), 1649 * or if a read-ahead-mark is set on the VM page, 1650 * jump to readrest, else we found the page and 1651 * can return. 1652 * 1653 * We can release the spl once we have marked the 1654 * page busy. 1655 */ 1656 if (fs->mary[0]->object != &kernel_object) { 1657 if ((fs->mary[0]->valid & VM_PAGE_BITS_ALL) != 1658 VM_PAGE_BITS_ALL) { 1659 goto readrest; 1660 } 1661 if (fs->mary[0]->flags & PG_RAM) { 1662 if (debug_cluster) 1663 kprintf("R"); 1664 vm_page_flag_clear(fs->mary[0], PG_RAM); 1665 goto readrest; 1666 } 1667 } 1668 atomic_clear_int(&fs->first_ba->flags, 1669 VM_MAP_BACK_EXCL_HEUR); 1670 break; /* break to PAGE HAS BEEN FOUND */ 1671 } 1672 1673 /* 1674 * Page is not resident, If this is the search termination 1675 * or the pager might contain the page, allocate a new page. 1676 */ 1677 if (TRYPAGER(fs) || fs->ba == fs->first_ba) { 1678 /* 1679 * If this is a SWAP object we can use the shared 1680 * lock to check existence of a swap block. If 1681 * there isn't one we can skip to the next object. 1682 * 1683 * However, if this is the first object we allocate 1684 * a page now just in case we need to copy to it 1685 * later. 1686 */ 1687 if (fs->ba != fs->first_ba && 1688 fs->ba->object->type == OBJT_SWAP) { 1689 if (swap_pager_haspage_locked(fs->ba->object, 1690 pindex) == 0) { 1691 goto next; 1692 } 1693 } 1694 1695 /* 1696 * Allocating, must be exclusive. 1697 */ 1698 atomic_set_int(&fs->first_ba->flags, 1699 VM_MAP_BACK_EXCL_HEUR); 1700 if (fs->ba == fs->first_ba && fs->first_shared) { 1701 fs->first_shared = 0; 1702 vm_object_pip_wakeup(fs->first_ba->object); 1703 unlock_things(fs); 1704 return (KERN_TRY_AGAIN); 1705 } 1706 if (fs->ba != fs->first_ba && fs->shared) { 1707 fs->first_shared = 0; 1708 fs->shared = 0; 1709 vm_object_pip_wakeup(fs->first_ba->object); 1710 unlock_things(fs); 1711 return (KERN_TRY_AGAIN); 1712 } 1713 1714 /* 1715 * If the page is beyond the object size we fail 1716 */ 1717 if (pindex >= fs->ba->object->size) { 1718 vm_object_pip_wakeup(fs->first_ba->object); 1719 unlock_things(fs); 1720 return (KERN_PROTECTION_FAILURE); 1721 } 1722 1723 /* 1724 * Allocate a new page for this object/offset pair. 1725 * 1726 * It is possible for the allocation to race, so 1727 * handle the case. 1728 * 1729 * Does not apply to OBJT_MGTDEVICE (e.g. gpu / drm 1730 * subsystem). For OBJT_MGTDEVICE the pages are not 1731 * indexed in the VM object at all but instead directly 1732 * entered into the pmap. 1733 */ 1734 fs->mary[0] = NULL; 1735 if (fs->ba->object->type == OBJT_MGTDEVICE) 1736 goto readrest; 1737 1738 if (!vm_paging_severe()) { 1739 fs->mary[0] = vm_page_alloc(fs->ba->object, 1740 pindex, 1741 ((fs->vp || fs->ba->backing_ba) ? 1742 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL : 1743 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL | 1744 VM_ALLOC_USE_GD | VM_ALLOC_ZERO)); 1745 } 1746 if (fs->mary[0] == NULL) { 1747 vm_object_pip_wakeup(fs->first_ba->object); 1748 unlock_things(fs); 1749 if (allow_nofault == 0 || 1750 (curthread->td_flags & TDF_NOFAULT) == 0) { 1751 thread_t td; 1752 1753 vm_wait_pfault(); 1754 td = curthread; 1755 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1756 return (KERN_PROTECTION_FAILURE); 1757 } 1758 return (KERN_TRY_AGAIN); 1759 } 1760 1761 /* 1762 * Fall through to readrest. We have a new page which 1763 * will have to be paged (since m->valid will be 0). 1764 */ 1765 } 1766 1767 readrest: 1768 /* 1769 * We have found an invalid or partially valid page, a 1770 * page with a read-ahead mark which might be partially or 1771 * fully valid (and maybe dirty too), or we have allocated 1772 * a new page. 1773 * 1774 * Attempt to fault-in the page if there is a chance that the 1775 * pager has it, and potentially fault in additional pages 1776 * at the same time. 1777 * 1778 * If TRYPAGER is true then fs.mary[0] will be non-NULL and 1779 * busied for us. 1780 */ 1781 if (TRYPAGER(fs)) { 1782 u_char behavior = vm_map_entry_behavior(fs->entry); 1783 vm_object_t object; 1784 vm_page_t first_m; 1785 int seqaccess; 1786 int rv; 1787 1788 if (behavior == MAP_ENTRY_BEHAV_RANDOM) 1789 seqaccess = 0; 1790 else 1791 seqaccess = -1; 1792 1793 /* 1794 * Doing I/O may synchronously insert additional 1795 * pages so we can't be shared at this point either. 1796 * 1797 * NOTE: We can't free fs->mary[0] here in the 1798 * allocated case (fs->ba != fs->first_ba) as 1799 * this would require an exclusively locked 1800 * VM object. 1801 */ 1802 if (fs->ba == fs->first_ba && fs->first_shared) { 1803 if (fs->mary[0]) { 1804 vm_page_deactivate(fs->mary[0]); 1805 vm_page_wakeup(fs->mary[0]); 1806 fs->mary[0]= NULL; 1807 } 1808 fs->first_shared = 0; 1809 vm_object_pip_wakeup(fs->first_ba->object); 1810 unlock_things(fs); 1811 return (KERN_TRY_AGAIN); 1812 } 1813 if (fs->ba != fs->first_ba && fs->shared) { 1814 if (fs->mary[0]) { 1815 vm_page_deactivate(fs->mary[0]); 1816 vm_page_wakeup(fs->mary[0]); 1817 fs->mary[0] = NULL; 1818 } 1819 fs->first_shared = 0; 1820 fs->shared = 0; 1821 vm_object_pip_wakeup(fs->first_ba->object); 1822 unlock_things(fs); 1823 return (KERN_TRY_AGAIN); 1824 } 1825 1826 object = fs->ba->object; 1827 first_m = NULL; 1828 1829 /* object is held, no more access to entry or ba's */ 1830 1831 /* 1832 * Acquire the page data. We still hold object 1833 * and the page has been BUSY's. 1834 * 1835 * We own the page, but we must re-issue the lookup 1836 * because the pager may have replaced it (for example, 1837 * in order to enter a fictitious page into the 1838 * object). In this situation the pager will have 1839 * cleaned up the old page and left the new one 1840 * busy for us. 1841 * 1842 * If we got here through a PG_RAM read-ahead 1843 * mark the page may be partially dirty and thus 1844 * not freeable. Don't bother checking to see 1845 * if the pager has the page because we can't free 1846 * it anyway. We have to depend on the get_page 1847 * operation filling in any gaps whether there is 1848 * backing store or not. 1849 * 1850 * We must dispose of the page (fs->mary[0]) and also 1851 * possibly first_m (the fronting layer). If 1852 * this is a write fault leave the page intact 1853 * because we will probably have to copy fs->mary[0] 1854 * to fs->first_m on the retry. If this is a 1855 * read fault we probably won't need the page. 1856 * 1857 * For OBJT_MGTDEVICE (and eventually all types), 1858 * fs->mary[0] is not pre-allocated and may be set 1859 * to a vm_page (busied for us) without being inserted 1860 * into the object. In this case we want to return 1861 * the vm_page directly so the caller can issue the 1862 * pmap_enter(). 1863 */ 1864 rv = vm_pager_get_page(object, pindex, 1865 &fs->mary[0], seqaccess); 1866 1867 if (rv == VM_PAGER_OK) { 1868 ++fs->hardfault; 1869 if (object->type == OBJT_MGTDEVICE) { 1870 break; 1871 } 1872 1873 fs->mary[0] = vm_page_lookup(object, pindex); 1874 if (fs->mary[0]) { 1875 vm_page_activate(fs->mary[0]); 1876 vm_page_wakeup(fs->mary[0]); 1877 fs->mary[0] = NULL; 1878 } 1879 1880 if (fs->mary[0]) { 1881 /* NOT REACHED */ 1882 /* have page */ 1883 break; 1884 } 1885 vm_object_pip_wakeup(fs->first_ba->object); 1886 unlock_things(fs); 1887 return (KERN_TRY_AGAIN); 1888 } 1889 1890 /* 1891 * If the pager doesn't have the page, continue on 1892 * to the next object. Retain the vm_page if this 1893 * is the first object, we may need to copy into 1894 * it later. 1895 */ 1896 if (rv == VM_PAGER_FAIL) { 1897 if (fs->ba != fs->first_ba) { 1898 if (fs->mary[0]) { 1899 vm_page_free(fs->mary[0]); 1900 fs->mary[0] = NULL; 1901 } 1902 } 1903 goto next; 1904 } 1905 1906 /* 1907 * Remove the bogus page (which does not exist at this 1908 * object/offset). 1909 * 1910 * Also wake up any other process that may want to bring 1911 * in this page. 1912 * 1913 * If this is the top-level object, we must leave the 1914 * busy page to prevent another process from rushing 1915 * past us, and inserting the page in that object at 1916 * the same time that we are. 1917 */ 1918 if (rv == VM_PAGER_ERROR) { 1919 if (curproc) { 1920 kprintf("vm_fault: pager read error, " 1921 "pid %d (%s)\n", 1922 curproc->p_pid, 1923 curproc->p_comm); 1924 } else { 1925 kprintf("vm_fault: pager read error, " 1926 "thread %p (%s)\n", 1927 curthread, 1928 curthread->td_comm); 1929 } 1930 } 1931 1932 /* 1933 * I/O error or data outside pager's range. 1934 */ 1935 if (fs->mary[0]) { 1936 vnode_pager_freepage(fs->mary[0]); 1937 fs->mary[0] = NULL; 1938 } 1939 if (first_m) { 1940 vm_page_free(first_m); 1941 first_m = NULL; /* safety */ 1942 } 1943 vm_object_pip_wakeup(object); 1944 unlock_things(fs); 1945 1946 switch(rv) { 1947 case VM_PAGER_ERROR: 1948 return (KERN_FAILURE); 1949 case VM_PAGER_BAD: 1950 return (KERN_PROTECTION_FAILURE); 1951 default: 1952 return (KERN_PROTECTION_FAILURE); 1953 } 1954 1955 #if 0 1956 /* 1957 * Data outside the range of the pager or an I/O error 1958 * 1959 * The page may have been wired during the pagein, 1960 * e.g. by the buffer cache, and cannot simply be 1961 * freed. Call vnode_pager_freepage() to deal with it. 1962 * 1963 * The object is not held shared so we can safely 1964 * free the page. 1965 */ 1966 if (fs->ba != fs->first_ba) { 1967 1968 /* 1969 * XXX - we cannot just fall out at this 1970 * point, m has been freed and is invalid! 1971 */ 1972 } 1973 1974 /* 1975 * XXX - the check for kernel_map is a kludge to work 1976 * around having the machine panic on a kernel space 1977 * fault w/ I/O error. 1978 */ 1979 if (((fs->map != &kernel_map) && 1980 (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 1981 if (fs->m) { 1982 /* from just above */ 1983 KKASSERT(fs->first_shared == 0); 1984 vnode_pager_freepage(fs->m); 1985 fs->m = NULL; 1986 } 1987 /* NOT REACHED */ 1988 } 1989 #endif 1990 } 1991 1992 next: 1993 /* 1994 * We get here if the object has a default pager (or unwiring) 1995 * or the pager doesn't have the page. 1996 * 1997 * fs->first_m will be used for the COW unless we find a 1998 * deeper page to be mapped read-only, in which case the 1999 * unlock*(fs) will free first_m. 2000 */ 2001 if (fs->ba == fs->first_ba) 2002 fs->first_m = fs->mary[0]; 2003 2004 /* 2005 * Move on to the next object. The chain lock should prevent 2006 * the backing_object from getting ripped out from under us. 2007 * 2008 * The object lock for the next object is governed by 2009 * fs->shared. 2010 */ 2011 next_ba = fs->ba->backing_ba; 2012 if (next_ba == NULL) { 2013 /* 2014 * If there's no object left, fill the page in the top 2015 * object with zeros. 2016 */ 2017 if (fs->ba != fs->first_ba) { 2018 vm_object_pip_wakeup(fs->ba->object); 2019 vm_object_drop(fs->ba->object); 2020 fs->ba = fs->first_ba; 2021 pindex = first_pindex; 2022 fs->mary[0] = fs->first_m; 2023 } 2024 fs->first_m = NULL; 2025 2026 /* 2027 * Zero the page and mark it valid. 2028 */ 2029 vm_page_zero_fill(fs->mary[0]); 2030 mycpu->gd_cnt.v_zfod++; 2031 fs->mary[0]->valid = VM_PAGE_BITS_ALL; 2032 break; /* break to PAGE HAS BEEN FOUND */ 2033 } 2034 2035 if (fs->shared) 2036 vm_object_hold_shared(next_ba->object); 2037 else 2038 vm_object_hold(next_ba->object); 2039 KKASSERT(next_ba == fs->ba->backing_ba); 2040 pindex -= OFF_TO_IDX(fs->ba->offset); 2041 pindex += OFF_TO_IDX(next_ba->offset); 2042 2043 if (fs->ba != fs->first_ba) { 2044 vm_object_pip_wakeup(fs->ba->object); 2045 vm_object_lock_swap(); /* flip ba/next_ba */ 2046 vm_object_drop(fs->ba->object); 2047 } 2048 fs->ba = next_ba; 2049 vm_object_pip_add(next_ba->object, 1); 2050 } 2051 2052 /* 2053 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 2054 * is held.] 2055 * 2056 * object still held. 2057 * vm_map may not be locked (determined by fs->lookup_still_valid) 2058 * 2059 * local shared variable may be different from fs->shared. 2060 * 2061 * If the page is being written, but isn't already owned by the 2062 * top-level object, we have to copy it into a new page owned by the 2063 * top-level object. 2064 */ 2065 KASSERT((fs->mary[0]->busy_count & PBUSY_LOCKED) != 0, 2066 ("vm_fault: not busy after main loop")); 2067 2068 if (fs->ba != fs->first_ba) { 2069 /* 2070 * We only really need to copy if we want to write it. 2071 */ 2072 if (fault_type & VM_PROT_WRITE) { 2073 #if 0 2074 /* CODE REFACTOR IN PROGRESS, REMOVE OPTIMIZATION */ 2075 /* 2076 * This allows pages to be virtually copied from a 2077 * backing_object into the first_object, where the 2078 * backing object has no other refs to it, and cannot 2079 * gain any more refs. Instead of a bcopy, we just 2080 * move the page from the backing object to the 2081 * first object. Note that we must mark the page 2082 * dirty in the first object so that it will go out 2083 * to swap when needed. 2084 */ 2085 if (virtual_copy_ok(fs)) { 2086 /* 2087 * (first_m) and (m) are both busied. We have 2088 * move (m) into (first_m)'s object/pindex 2089 * in an atomic fashion, then free (first_m). 2090 * 2091 * first_object is held so second remove 2092 * followed by the rename should wind 2093 * up being atomic. vm_page_free() might 2094 * block so we don't do it until after the 2095 * rename. 2096 */ 2097 vm_page_protect(fs->first_m, VM_PROT_NONE); 2098 vm_page_remove(fs->first_m); 2099 vm_page_rename(fs->mary[0], 2100 fs->first_ba->object, 2101 first_pindex); 2102 vm_page_free(fs->first_m); 2103 fs->first_m = fs->mary[0]; 2104 fs->mary[0] = NULL; 2105 mycpu->gd_cnt.v_cow_optim++; 2106 } else 2107 #endif 2108 { 2109 /* 2110 * Oh, well, lets copy it. 2111 * 2112 * We used to unmap the original page here 2113 * because vm_fault_page() didn't and this 2114 * would cause havoc for the umtx*() code 2115 * and the procfs code. 2116 * 2117 * This is no longer necessary. The 2118 * vm_fault_page() routine will now unmap the 2119 * page after a COW, and the umtx code will 2120 * recover on its own. 2121 */ 2122 /* 2123 * NOTE: Since fs->mary[0] is a backing page, 2124 * it is read-only, so there isn't any 2125 * copy race vs writers. 2126 */ 2127 KKASSERT(fs->first_shared == 0); 2128 vm_page_copy(fs->mary[0], fs->first_m); 2129 /* pmap_remove_specific( 2130 &curthread->td_lwp->lwp_vmspace->vm_pmap, 2131 fs->mary[0]); */ 2132 } 2133 2134 /* 2135 * We no longer need the old page or object. 2136 */ 2137 if (fs->mary[0]) 2138 release_page(fs); 2139 2140 /* 2141 * fs->ba != fs->first_ba due to above conditional 2142 */ 2143 vm_object_pip_wakeup(fs->ba->object); 2144 vm_object_drop(fs->ba->object); 2145 fs->ba = fs->first_ba; 2146 2147 /* 2148 * Only use the new page below... 2149 */ 2150 mycpu->gd_cnt.v_cow_faults++; 2151 fs->mary[0] = fs->first_m; 2152 pindex = first_pindex; 2153 } else { 2154 /* 2155 * If it wasn't a write fault avoid having to copy 2156 * the page by mapping it read-only from backing 2157 * store. The process is not allowed to modify 2158 * backing pages. 2159 */ 2160 fs->prot &= ~VM_PROT_WRITE; 2161 } 2162 } 2163 2164 /* 2165 * Relock the map if necessary, then check the generation count. 2166 * relock_map() will update fs->timestamp to account for the 2167 * relocking if necessary. 2168 * 2169 * If the count has changed after relocking then all sorts of 2170 * crap may have happened and we have to retry. 2171 * 2172 * NOTE: The relock_map() can fail due to a deadlock against 2173 * the vm_page we are holding BUSY. 2174 */ 2175 KKASSERT(fs->lookup_still_valid != 0); 2176 #if 0 2177 if (fs->lookup_still_valid == 0 && fs->map) { 2178 if (relock_map(fs) || 2179 fs->map->timestamp != fs->map_generation) { 2180 release_page(fs); 2181 vm_object_pip_wakeup(fs->first_ba->object); 2182 unlock_things(fs); 2183 return (KERN_TRY_AGAIN); 2184 } 2185 } 2186 #endif 2187 2188 /* 2189 * If the fault is a write, we know that this page is being 2190 * written NOW so dirty it explicitly to save on pmap_is_modified() 2191 * calls later. 2192 * 2193 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC 2194 * if the page is already dirty to prevent data written with 2195 * the expectation of being synced from not being synced. 2196 * Likewise if this entry does not request NOSYNC then make 2197 * sure the page isn't marked NOSYNC. Applications sharing 2198 * data should use the same flags to avoid ping ponging. 2199 * 2200 * Also tell the backing pager, if any, that it should remove 2201 * any swap backing since the page is now dirty. 2202 */ 2203 vm_page_activate(fs->mary[0]); 2204 if (fs->prot & VM_PROT_WRITE) { 2205 vm_object_set_writeable_dirty(fs->first_ba->object); 2206 vm_set_nosync(fs->mary[0], fs->entry); 2207 if (fs->fault_flags & VM_FAULT_DIRTY) { 2208 vm_page_dirty(fs->mary[0]); 2209 if (fs->mary[0]->flags & PG_SWAPPED) { 2210 /* 2211 * If the page is swapped out we have to call 2212 * swap_pager_unswapped() which requires an 2213 * exclusive object lock. If we are shared, 2214 * we must clear the shared flag and retry. 2215 */ 2216 if ((fs->ba == fs->first_ba && 2217 fs->first_shared) || 2218 (fs->ba != fs->first_ba && fs->shared)) { 2219 vm_page_wakeup(fs->mary[0]); 2220 fs->mary[0] = NULL; 2221 if (fs->ba == fs->first_ba) 2222 fs->first_shared = 0; 2223 else 2224 fs->shared = 0; 2225 vm_object_pip_wakeup( 2226 fs->first_ba->object); 2227 unlock_things(fs); 2228 return (KERN_TRY_AGAIN); 2229 } 2230 swap_pager_unswapped(fs->mary[0]); 2231 } 2232 } 2233 } 2234 2235 /* 2236 * We found our page at backing layer ba. Leave the layer state 2237 * intact. 2238 */ 2239 2240 vm_object_pip_wakeup(fs->first_ba->object); 2241 #if 0 2242 if (fs->ba != fs->first_ba) 2243 vm_object_drop(fs->ba->object); 2244 #endif 2245 2246 /* 2247 * Page had better still be busy. We are still locked up and 2248 * fs->ba->object will have another PIP reference for the case 2249 * where fs->ba != fs->first_ba. 2250 */ 2251 KASSERT(fs->mary[0]->busy_count & PBUSY_LOCKED, 2252 ("vm_fault: page %p not busy!", fs->mary[0])); 2253 2254 /* 2255 * Sanity check: page must be completely valid or it is not fit to 2256 * map into user space. vm_pager_get_pages() ensures this. 2257 */ 2258 if (fs->mary[0]->valid != VM_PAGE_BITS_ALL) { 2259 vm_page_zero_invalid(fs->mary[0], TRUE); 2260 kprintf("Warning: page %p partially invalid on fault\n", 2261 fs->mary[0]); 2262 } 2263 2264 return (KERN_SUCCESS); 2265 } 2266 2267 /* 2268 * Wire down a range of virtual addresses in a map. The entry in question 2269 * should be marked in-transition and the map must be locked. We must 2270 * release the map temporarily while faulting-in the page to avoid a 2271 * deadlock. Note that the entry may be clipped while we are blocked but 2272 * will never be freed. 2273 * 2274 * map must be locked on entry. 2275 */ 2276 int 2277 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, 2278 boolean_t user_wire, int kmflags) 2279 { 2280 boolean_t fictitious; 2281 vm_offset_t start; 2282 vm_offset_t end; 2283 vm_offset_t va; 2284 pmap_t pmap; 2285 int rv; 2286 int wire_prot; 2287 int fault_flags; 2288 vm_page_t m; 2289 2290 if (user_wire) { 2291 wire_prot = VM_PROT_READ; 2292 fault_flags = VM_FAULT_USER_WIRE; 2293 } else { 2294 wire_prot = VM_PROT_READ | VM_PROT_WRITE; 2295 fault_flags = VM_FAULT_CHANGE_WIRING; 2296 } 2297 if (kmflags & KM_NOTLBSYNC) 2298 wire_prot |= VM_PROT_NOSYNC; 2299 2300 pmap = vm_map_pmap(map); 2301 start = entry->ba.start; 2302 end = entry->ba.end; 2303 2304 switch(entry->maptype) { 2305 case VM_MAPTYPE_NORMAL: 2306 fictitious = entry->ba.object && 2307 ((entry->ba.object->type == OBJT_DEVICE) || 2308 (entry->ba.object->type == OBJT_MGTDEVICE)); 2309 break; 2310 case VM_MAPTYPE_UKSMAP: 2311 fictitious = TRUE; 2312 break; 2313 default: 2314 fictitious = FALSE; 2315 break; 2316 } 2317 2318 if (entry->eflags & MAP_ENTRY_KSTACK) 2319 start += PAGE_SIZE; 2320 map->timestamp++; 2321 vm_map_unlock(map); 2322 2323 /* 2324 * We simulate a fault to get the page and enter it in the physical 2325 * map. 2326 */ 2327 for (va = start; va < end; va += PAGE_SIZE) { 2328 rv = vm_fault(map, va, wire_prot, fault_flags); 2329 if (rv) { 2330 while (va > start) { 2331 va -= PAGE_SIZE; 2332 m = pmap_unwire(pmap, va); 2333 if (m && !fictitious) { 2334 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2335 vm_page_unwire(m, 1); 2336 vm_page_wakeup(m); 2337 } 2338 } 2339 goto done; 2340 } 2341 } 2342 rv = KERN_SUCCESS; 2343 done: 2344 vm_map_lock(map); 2345 2346 return (rv); 2347 } 2348 2349 /* 2350 * Unwire a range of virtual addresses in a map. The map should be 2351 * locked. 2352 */ 2353 void 2354 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry) 2355 { 2356 boolean_t fictitious; 2357 vm_offset_t start; 2358 vm_offset_t end; 2359 vm_offset_t va; 2360 pmap_t pmap; 2361 vm_page_t m; 2362 2363 pmap = vm_map_pmap(map); 2364 start = entry->ba.start; 2365 end = entry->ba.end; 2366 fictitious = entry->ba.object && 2367 ((entry->ba.object->type == OBJT_DEVICE) || 2368 (entry->ba.object->type == OBJT_MGTDEVICE)); 2369 if (entry->eflags & MAP_ENTRY_KSTACK) 2370 start += PAGE_SIZE; 2371 2372 /* 2373 * Since the pages are wired down, we must be able to get their 2374 * mappings from the physical map system. 2375 */ 2376 for (va = start; va < end; va += PAGE_SIZE) { 2377 m = pmap_unwire(pmap, va); 2378 if (m && !fictitious) { 2379 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2380 vm_page_unwire(m, 1); 2381 vm_page_wakeup(m); 2382 } 2383 } 2384 } 2385 2386 /* 2387 * Simulate write faults to bring all data into the head object, return 2388 * KERN_SUCCESS on success (which should be always unless the system runs 2389 * out of memory). 2390 * 2391 * The caller will handle destroying the backing_ba's. 2392 */ 2393 int 2394 vm_fault_collapse(vm_map_t map, vm_map_entry_t entry) 2395 { 2396 struct faultstate fs; 2397 vm_ooffset_t scan; 2398 vm_pindex_t pindex; 2399 vm_object_t object; 2400 int rv; 2401 int all_shadowed; 2402 2403 bzero(&fs, sizeof(fs)); 2404 object = entry->ba.object; 2405 2406 fs.first_prot = entry->max_protection | /* optional VM_PROT_EXECUTE */ 2407 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE; 2408 fs.fault_flags = VM_FAULT_NORMAL; 2409 fs.map = map; 2410 fs.entry = entry; 2411 fs.lookup_still_valid = -1; /* leave map atomically locked */ 2412 fs.first_ba = &entry->ba; 2413 fs.first_ba_held = -1; /* leave object held */ 2414 2415 /* fs.hardfault */ 2416 2417 vm_object_hold(object); 2418 rv = KERN_SUCCESS; 2419 2420 scan = entry->ba.start; 2421 all_shadowed = 1; 2422 2423 while (scan < entry->ba.end) { 2424 pindex = OFF_TO_IDX(entry->ba.offset + (scan - entry->ba.start)); 2425 2426 if (vm_page_lookup(object, pindex)) { 2427 scan += PAGE_SIZE; 2428 continue; 2429 } 2430 2431 all_shadowed = 0; 2432 fs.ba = fs.first_ba; 2433 fs.prot = fs.first_prot; 2434 2435 rv = vm_fault_object(&fs, pindex, fs.first_prot, 1); 2436 if (rv == KERN_TRY_AGAIN) 2437 continue; 2438 if (rv != KERN_SUCCESS) 2439 break; 2440 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 2441 vm_page_activate(fs.mary[0]); 2442 vm_page_wakeup(fs.mary[0]); 2443 scan += PAGE_SIZE; 2444 } 2445 KKASSERT(entry->ba.object == object); 2446 vm_object_drop(object); 2447 2448 /* 2449 * If the fronting object did not have every page we have to clear 2450 * the pmap range due to the pages being changed so we can fault-in 2451 * the proper pages. 2452 */ 2453 if (all_shadowed == 0) 2454 pmap_remove(map->pmap, entry->ba.start, entry->ba.end); 2455 2456 return rv; 2457 } 2458 2459 /* 2460 * Copy all of the pages from one map entry to another. If the source 2461 * is wired down we just use vm_page_lookup(). If not we use 2462 * vm_fault_object(). 2463 * 2464 * The source and destination maps must be locked for write. 2465 * The source and destination maps token must be held 2466 * 2467 * No other requirements. 2468 * 2469 * XXX do segment optimization 2470 */ 2471 void 2472 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 2473 vm_map_entry_t dst_entry, vm_map_entry_t src_entry) 2474 { 2475 vm_object_t dst_object; 2476 vm_object_t src_object; 2477 vm_ooffset_t dst_offset; 2478 vm_ooffset_t src_offset; 2479 vm_prot_t prot; 2480 vm_offset_t vaddr; 2481 vm_page_t dst_m; 2482 vm_page_t src_m; 2483 2484 src_object = src_entry->ba.object; 2485 src_offset = src_entry->ba.offset; 2486 2487 /* 2488 * Create the top-level object for the destination entry. (Doesn't 2489 * actually shadow anything - we copy the pages directly.) 2490 */ 2491 vm_map_entry_allocate_object(dst_entry); 2492 dst_object = dst_entry->ba.object; 2493 2494 prot = dst_entry->max_protection; 2495 2496 /* 2497 * Loop through all of the pages in the entry's range, copying each 2498 * one from the source object (it should be there) to the destination 2499 * object. 2500 */ 2501 vm_object_hold(src_object); 2502 vm_object_hold(dst_object); 2503 2504 for (vaddr = dst_entry->ba.start, dst_offset = 0; 2505 vaddr < dst_entry->ba.end; 2506 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 2507 2508 /* 2509 * Allocate a page in the destination object 2510 */ 2511 do { 2512 dst_m = vm_page_alloc(dst_object, 2513 OFF_TO_IDX(dst_offset), 2514 VM_ALLOC_NORMAL); 2515 if (dst_m == NULL) { 2516 vm_wait(0); 2517 } 2518 } while (dst_m == NULL); 2519 2520 /* 2521 * Find the page in the source object, and copy it in. 2522 * (Because the source is wired down, the page will be in 2523 * memory.) 2524 */ 2525 src_m = vm_page_lookup(src_object, 2526 OFF_TO_IDX(dst_offset + src_offset)); 2527 if (src_m == NULL) 2528 panic("vm_fault_copy_wired: page missing"); 2529 2530 vm_page_copy(src_m, dst_m); 2531 2532 /* 2533 * Enter it in the pmap... 2534 */ 2535 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry); 2536 2537 /* 2538 * Mark it no longer busy, and put it on the active list. 2539 */ 2540 vm_page_activate(dst_m); 2541 vm_page_wakeup(dst_m); 2542 } 2543 vm_object_drop(dst_object); 2544 vm_object_drop(src_object); 2545 } 2546 2547 #if 0 2548 2549 /* 2550 * This routine checks around the requested page for other pages that 2551 * might be able to be faulted in. This routine brackets the viable 2552 * pages for the pages to be paged in. 2553 * 2554 * Inputs: 2555 * m, rbehind, rahead 2556 * 2557 * Outputs: 2558 * marray (array of vm_page_t), reqpage (index of requested page) 2559 * 2560 * Return value: 2561 * number of pages in marray 2562 */ 2563 static int 2564 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead, 2565 vm_page_t *marray, int *reqpage) 2566 { 2567 int i,j; 2568 vm_object_t object; 2569 vm_pindex_t pindex, startpindex, endpindex, tpindex; 2570 vm_page_t rtm; 2571 int cbehind, cahead; 2572 2573 object = m->object; 2574 pindex = m->pindex; 2575 2576 /* 2577 * we don't fault-ahead for device pager 2578 */ 2579 if ((object->type == OBJT_DEVICE) || 2580 (object->type == OBJT_MGTDEVICE)) { 2581 *reqpage = 0; 2582 marray[0] = m; 2583 return 1; 2584 } 2585 2586 /* 2587 * if the requested page is not available, then give up now 2588 */ 2589 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 2590 *reqpage = 0; /* not used by caller, fix compiler warn */ 2591 return 0; 2592 } 2593 2594 if ((cbehind == 0) && (cahead == 0)) { 2595 *reqpage = 0; 2596 marray[0] = m; 2597 return 1; 2598 } 2599 2600 if (rahead > cahead) { 2601 rahead = cahead; 2602 } 2603 2604 if (rbehind > cbehind) { 2605 rbehind = cbehind; 2606 } 2607 2608 /* 2609 * Do not do any readahead if we have insufficient free memory. 2610 * 2611 * XXX code was broken disabled before and has instability 2612 * with this conditonal fixed, so shortcut for now. 2613 */ 2614 if (burst_fault == 0 || vm_page_count_severe()) { 2615 marray[0] = m; 2616 *reqpage = 0; 2617 return 1; 2618 } 2619 2620 /* 2621 * scan backward for the read behind pages -- in memory 2622 * 2623 * Assume that if the page is not found an interrupt will not 2624 * create it. Theoretically interrupts can only remove (busy) 2625 * pages, not create new associations. 2626 */ 2627 if (pindex > 0) { 2628 if (rbehind > pindex) { 2629 rbehind = pindex; 2630 startpindex = 0; 2631 } else { 2632 startpindex = pindex - rbehind; 2633 } 2634 2635 vm_object_hold(object); 2636 for (tpindex = pindex; tpindex > startpindex; --tpindex) { 2637 if (vm_page_lookup(object, tpindex - 1)) 2638 break; 2639 } 2640 2641 i = 0; 2642 while (tpindex < pindex) { 2643 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2644 VM_ALLOC_NULL_OK); 2645 if (rtm == NULL) { 2646 for (j = 0; j < i; j++) { 2647 vm_page_free(marray[j]); 2648 } 2649 vm_object_drop(object); 2650 marray[0] = m; 2651 *reqpage = 0; 2652 return 1; 2653 } 2654 marray[i] = rtm; 2655 ++i; 2656 ++tpindex; 2657 } 2658 vm_object_drop(object); 2659 } else { 2660 i = 0; 2661 } 2662 2663 /* 2664 * Assign requested page 2665 */ 2666 marray[i] = m; 2667 *reqpage = i; 2668 ++i; 2669 2670 /* 2671 * Scan forwards for read-ahead pages 2672 */ 2673 tpindex = pindex + 1; 2674 endpindex = tpindex + rahead; 2675 if (endpindex > object->size) 2676 endpindex = object->size; 2677 2678 vm_object_hold(object); 2679 while (tpindex < endpindex) { 2680 if (vm_page_lookup(object, tpindex)) 2681 break; 2682 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2683 VM_ALLOC_NULL_OK); 2684 if (rtm == NULL) 2685 break; 2686 marray[i] = rtm; 2687 ++i; 2688 ++tpindex; 2689 } 2690 vm_object_drop(object); 2691 2692 return (i); 2693 } 2694 2695 #endif 2696 2697 /* 2698 * vm_prefault() provides a quick way of clustering pagefaults into a 2699 * processes address space. It is a "cousin" of pmap_object_init_pt, 2700 * except it runs at page fault time instead of mmap time. 2701 * 2702 * vm.fast_fault Enables pre-faulting zero-fill pages 2703 * 2704 * vm.prefault_pages Number of pages (1/2 negative, 1/2 positive) to 2705 * prefault. Scan stops in either direction when 2706 * a page is found to already exist. 2707 * 2708 * This code used to be per-platform pmap_prefault(). It is now 2709 * machine-independent and enhanced to also pre-fault zero-fill pages 2710 * (see vm.fast_fault) as well as make them writable, which greatly 2711 * reduces the number of page faults programs incur. 2712 * 2713 * Application performance when pre-faulting zero-fill pages is heavily 2714 * dependent on the application. Very tiny applications like /bin/echo 2715 * lose a little performance while applications of any appreciable size 2716 * gain performance. Prefaulting multiple pages also reduces SMP 2717 * congestion and can improve SMP performance significantly. 2718 * 2719 * NOTE! prot may allow writing but this only applies to the top level 2720 * object. If we wind up mapping a page extracted from a backing 2721 * object we have to make sure it is read-only. 2722 * 2723 * NOTE! The caller has already handled any COW operations on the 2724 * vm_map_entry via the normal fault code. Do NOT call this 2725 * shortcut unless the normal fault code has run on this entry. 2726 * 2727 * The related map must be locked. 2728 * No other requirements. 2729 */ 2730 __read_mostly static int vm_prefault_pages = 8; 2731 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0, 2732 "Maximum number of pages to pre-fault"); 2733 __read_mostly static int vm_fast_fault = 1; 2734 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0, 2735 "Burst fault zero-fill regions"); 2736 2737 /* 2738 * Set PG_NOSYNC if the map entry indicates so, but only if the page 2739 * is not already dirty by other means. This will prevent passive 2740 * filesystem syncing as well as 'sync' from writing out the page. 2741 */ 2742 static void 2743 vm_set_nosync(vm_page_t m, vm_map_entry_t entry) 2744 { 2745 if (entry->eflags & MAP_ENTRY_NOSYNC) { 2746 if (m->dirty == 0) 2747 vm_page_flag_set(m, PG_NOSYNC); 2748 } else { 2749 vm_page_flag_clear(m, PG_NOSYNC); 2750 } 2751 } 2752 2753 static void 2754 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot, 2755 int fault_flags) 2756 { 2757 vm_map_backing_t ba; /* first ba */ 2758 struct lwp *lp; 2759 vm_page_t m; 2760 vm_offset_t addr; 2761 vm_pindex_t index; 2762 vm_pindex_t pindex; 2763 vm_object_t object; 2764 int pprot; 2765 int i; 2766 int noneg; 2767 int nopos; 2768 int maxpages; 2769 2770 /* 2771 * Get stable max count value, disabled if set to 0 2772 */ 2773 maxpages = vm_prefault_pages; 2774 cpu_ccfence(); 2775 if (maxpages <= 0) 2776 return; 2777 2778 /* 2779 * We do not currently prefault mappings that use virtual page 2780 * tables. We do not prefault foreign pmaps. 2781 */ 2782 if (entry->maptype != VM_MAPTYPE_NORMAL) 2783 return; 2784 lp = curthread->td_lwp; 2785 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 2786 return; 2787 2788 /* 2789 * Limit pre-fault count to 1024 pages. 2790 */ 2791 if (maxpages > 1024) 2792 maxpages = 1024; 2793 2794 ba = &entry->ba; 2795 object = entry->ba.object; 2796 KKASSERT(object != NULL); 2797 2798 /* 2799 * NOTE: VM_FAULT_DIRTY allowed later so must hold object exclusively 2800 * now (or do something more complex XXX). 2801 */ 2802 vm_object_hold(object); 2803 2804 noneg = 0; 2805 nopos = 0; 2806 for (i = 0; i < maxpages; ++i) { 2807 vm_object_t lobject; 2808 vm_object_t nobject; 2809 vm_map_backing_t last_ba; /* last ba */ 2810 vm_map_backing_t next_ba; /* last ba */ 2811 int allocated = 0; 2812 int error; 2813 2814 /* 2815 * This can eat a lot of time on a heavily contended 2816 * machine so yield on the tick if needed. 2817 */ 2818 if ((i & 7) == 7) 2819 lwkt_yield(); 2820 2821 /* 2822 * Calculate the page to pre-fault, stopping the scan in 2823 * each direction separately if the limit is reached. 2824 */ 2825 if (i & 1) { 2826 if (noneg) 2827 continue; 2828 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 2829 } else { 2830 if (nopos) 2831 continue; 2832 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 2833 } 2834 if (addr < entry->ba.start) { 2835 noneg = 1; 2836 if (noneg && nopos) 2837 break; 2838 continue; 2839 } 2840 if (addr >= entry->ba.end) { 2841 nopos = 1; 2842 if (noneg && nopos) 2843 break; 2844 continue; 2845 } 2846 2847 /* 2848 * Skip pages already mapped, and stop scanning in that 2849 * direction. When the scan terminates in both directions 2850 * we are done. 2851 */ 2852 if (pmap_prefault_ok(pmap, addr) == 0) { 2853 if (i & 1) 2854 noneg = 1; 2855 else 2856 nopos = 1; 2857 if (noneg && nopos) 2858 break; 2859 continue; 2860 } 2861 2862 /* 2863 * Follow the backing layers to obtain the page to be mapped 2864 * into the pmap. 2865 * 2866 * If we reach the terminal object without finding a page 2867 * and we determine it would be advantageous, then allocate 2868 * a zero-fill page for the base object. The base object 2869 * is guaranteed to be OBJT_DEFAULT for this case. 2870 * 2871 * In order to not have to check the pager via *haspage*() 2872 * we stop if any non-default object is encountered. e.g. 2873 * a vnode or swap object would stop the loop. 2874 */ 2875 index = ((addr - entry->ba.start) + entry->ba.offset) >> 2876 PAGE_SHIFT; 2877 last_ba = ba; 2878 lobject = object; 2879 pindex = index; 2880 pprot = prot; 2881 2882 /*vm_object_hold(lobject); implied */ 2883 2884 while ((m = vm_page_lookup_busy_try(lobject, pindex, 2885 TRUE, &error)) == NULL) { 2886 if (lobject->type != OBJT_DEFAULT) 2887 break; 2888 if ((next_ba = last_ba->backing_ba) == NULL) { 2889 if (vm_fast_fault == 0) 2890 break; 2891 if ((prot & VM_PROT_WRITE) == 0 || 2892 vm_paging_min()) { 2893 break; 2894 } 2895 2896 /* 2897 * NOTE: Allocated from base object 2898 */ 2899 m = vm_page_alloc(object, index, 2900 VM_ALLOC_NORMAL | 2901 VM_ALLOC_ZERO | 2902 VM_ALLOC_USE_GD | 2903 VM_ALLOC_NULL_OK); 2904 if (m == NULL) 2905 break; 2906 allocated = 1; 2907 pprot = prot; 2908 /* lobject = object .. not needed */ 2909 break; 2910 } 2911 if (next_ba->offset & PAGE_MASK) 2912 break; 2913 nobject = next_ba->object; 2914 vm_object_hold(nobject); 2915 pindex -= last_ba->offset >> PAGE_SHIFT; 2916 pindex += next_ba->offset >> PAGE_SHIFT; 2917 if (last_ba != ba) { 2918 vm_object_lock_swap(); 2919 vm_object_drop(lobject); 2920 } 2921 lobject = nobject; 2922 last_ba = next_ba; 2923 pprot &= ~VM_PROT_WRITE; 2924 } 2925 2926 /* 2927 * NOTE: A non-NULL (m) will be associated with lobject if 2928 * it was found there, otherwise it is probably a 2929 * zero-fill page associated with the base object. 2930 * 2931 * Give-up if no page is available. 2932 */ 2933 if (m == NULL) { 2934 if (last_ba != ba) 2935 vm_object_drop(lobject); 2936 break; 2937 } 2938 2939 /* 2940 * The object must be marked dirty if we are mapping a 2941 * writable page. Note that (m) does not have to be 2942 * entered into the object, so use lobject or object 2943 * as appropriate instead of m->object. 2944 * 2945 * Do this before we potentially drop the object. 2946 */ 2947 if (pprot & VM_PROT_WRITE) { 2948 vm_object_set_writeable_dirty( 2949 (allocated ? object : lobject)); 2950 } 2951 2952 /* 2953 * Do not conditionalize on PG_RAM. If pages are present in 2954 * the VM system we assume optimal caching. If caching is 2955 * not optimal the I/O gravy train will be restarted when we 2956 * hit an unavailable page. We do not want to try to restart 2957 * the gravy train now because we really don't know how much 2958 * of the object has been cached. The cost for restarting 2959 * the gravy train should be low (since accesses will likely 2960 * be I/O bound anyway). 2961 */ 2962 if (last_ba != ba) 2963 vm_object_drop(lobject); 2964 2965 /* 2966 * Enter the page into the pmap if appropriate. If we had 2967 * allocated the page we have to place it on a queue. If not 2968 * we just have to make sure it isn't on the cache queue 2969 * (pages on the cache queue are not allowed to be mapped). 2970 * 2971 * When allocated is TRUE, m corresponds to object, 2972 * not lobject. 2973 */ 2974 if (allocated) { 2975 /* 2976 * Page must be zerod. 2977 */ 2978 vm_page_zero_fill(m); 2979 mycpu->gd_cnt.v_zfod++; 2980 m->valid = VM_PAGE_BITS_ALL; 2981 2982 /* 2983 * Handle dirty page case 2984 */ 2985 if (pprot & VM_PROT_WRITE) 2986 vm_set_nosync(m, entry); 2987 pmap_enter(pmap, addr, m, pprot, 0, entry); 2988 #if 0 2989 /* REMOVE ME, a burst counts as one fault */ 2990 mycpu->gd_cnt.v_vm_faults++; 2991 if (curthread->td_lwp) 2992 ++curthread->td_lwp->lwp_ru.ru_minflt; 2993 #endif 2994 vm_page_deactivate(m); 2995 if (pprot & VM_PROT_WRITE) { 2996 /*vm_object_set_writeable_dirty(object);*/ 2997 vm_set_nosync(m, entry); 2998 if (fault_flags & VM_FAULT_DIRTY) { 2999 vm_page_dirty(m); 3000 /*XXX*/ 3001 swap_pager_unswapped(m); 3002 } 3003 } 3004 vm_page_wakeup(m); 3005 } else if (error) { 3006 /* couldn't busy page, no wakeup */ 3007 } else if ( 3008 ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 3009 (m->flags & PG_FICTITIOUS) == 0) { 3010 /* 3011 * A fully valid page not undergoing soft I/O can 3012 * be immediately entered into the pmap. 3013 * 3014 * When allocated is false, m corresponds to lobject. 3015 */ 3016 if ((m->queue - m->pc) == PQ_CACHE) 3017 vm_page_deactivate(m); 3018 if (pprot & VM_PROT_WRITE) { 3019 /*vm_object_set_writeable_dirty(lobject);*/ 3020 vm_set_nosync(m, entry); 3021 if (fault_flags & VM_FAULT_DIRTY) { 3022 vm_page_dirty(m); 3023 /*XXX*/ 3024 swap_pager_unswapped(m); 3025 } 3026 } 3027 if (pprot & VM_PROT_WRITE) 3028 vm_set_nosync(m, entry); 3029 pmap_enter(pmap, addr, m, pprot, 0, entry); 3030 #if 0 3031 /* REMOVE ME, a burst counts as one fault */ 3032 mycpu->gd_cnt.v_vm_faults++; 3033 if (curthread->td_lwp) 3034 ++curthread->td_lwp->lwp_ru.ru_minflt; 3035 #endif 3036 vm_page_wakeup(m); 3037 } else { 3038 vm_page_wakeup(m); 3039 } 3040 } 3041 vm_object_drop(object); 3042 } 3043 3044 /* 3045 * Object can be held shared 3046 */ 3047 static void 3048 vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 3049 vm_map_entry_t entry, int prot, int fault_flags) 3050 { 3051 struct lwp *lp; 3052 vm_page_t m; 3053 vm_offset_t addr; 3054 vm_pindex_t pindex; 3055 vm_object_t object; 3056 int i; 3057 int noneg; 3058 int nopos; 3059 int maxpages; 3060 3061 /* 3062 * Get stable max count value, disabled if set to 0 3063 */ 3064 maxpages = vm_prefault_pages; 3065 cpu_ccfence(); 3066 if (maxpages <= 0) 3067 return; 3068 3069 /* 3070 * We do not currently prefault mappings that use virtual page 3071 * tables. We do not prefault foreign pmaps. 3072 */ 3073 if (entry->maptype != VM_MAPTYPE_NORMAL) 3074 return; 3075 lp = curthread->td_lwp; 3076 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 3077 return; 3078 object = entry->ba.object; 3079 if (entry->ba.backing_ba != NULL) 3080 return; 3081 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 3082 3083 /* 3084 * Limit pre-fault count to 1024 pages. 3085 */ 3086 if (maxpages > 1024) 3087 maxpages = 1024; 3088 3089 noneg = 0; 3090 nopos = 0; 3091 for (i = 0; i < maxpages; ++i) { 3092 int error; 3093 3094 /* 3095 * Calculate the page to pre-fault, stopping the scan in 3096 * each direction separately if the limit is reached. 3097 */ 3098 if (i & 1) { 3099 if (noneg) 3100 continue; 3101 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 3102 } else { 3103 if (nopos) 3104 continue; 3105 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 3106 } 3107 if (addr < entry->ba.start) { 3108 noneg = 1; 3109 if (noneg && nopos) 3110 break; 3111 continue; 3112 } 3113 if (addr >= entry->ba.end) { 3114 nopos = 1; 3115 if (noneg && nopos) 3116 break; 3117 continue; 3118 } 3119 3120 /* 3121 * Follow the VM object chain to obtain the page to be mapped 3122 * into the pmap. This version of the prefault code only 3123 * works with terminal objects. 3124 * 3125 * The page must already exist. If we encounter a problem 3126 * we stop here. 3127 * 3128 * WARNING! We cannot call swap_pager_unswapped() or insert 3129 * a new vm_page with a shared token. 3130 */ 3131 pindex = ((addr - entry->ba.start) + entry->ba.offset) >> 3132 PAGE_SHIFT; 3133 3134 /* 3135 * Skip pages already mapped, and stop scanning in that 3136 * direction. When the scan terminates in both directions 3137 * we are done. 3138 */ 3139 if (pmap_prefault_ok(pmap, addr) == 0) { 3140 if (i & 1) 3141 noneg = 1; 3142 else 3143 nopos = 1; 3144 if (noneg && nopos) 3145 break; 3146 continue; 3147 } 3148 3149 /* 3150 * Shortcut the read-only mapping case using the far more 3151 * efficient vm_page_lookup_sbusy_try() function. This 3152 * allows us to acquire the page soft-busied only which 3153 * is especially nice for concurrent execs of the same 3154 * program. 3155 * 3156 * The lookup function also validates page suitability 3157 * (all valid bits set, and not fictitious). 3158 * 3159 * If the page is in PQ_CACHE we have to fall-through 3160 * and hard-busy it so we can move it out of PQ_CACHE. 3161 */ 3162 if ((prot & VM_PROT_WRITE) == 0) { 3163 m = vm_page_lookup_sbusy_try(object, pindex, 3164 0, PAGE_SIZE); 3165 if (m == NULL) 3166 break; 3167 if ((m->queue - m->pc) != PQ_CACHE) { 3168 pmap_enter(pmap, addr, m, prot, 0, entry); 3169 #if 0 3170 /* REMOVE ME, a burst counts as one fault */ 3171 mycpu->gd_cnt.v_vm_faults++; 3172 if (curthread->td_lwp) 3173 ++curthread->td_lwp->lwp_ru.ru_minflt; 3174 #endif 3175 vm_page_sbusy_drop(m); 3176 continue; 3177 } 3178 vm_page_sbusy_drop(m); 3179 } 3180 3181 /* 3182 * Fallback to normal vm_page lookup code. This code 3183 * hard-busies the page. Not only that, but the page 3184 * can remain in that state for a significant period 3185 * time due to pmap_enter()'s overhead. 3186 */ 3187 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error); 3188 if (m == NULL || error) 3189 break; 3190 3191 /* 3192 * Stop if the page cannot be trivially entered into the 3193 * pmap. 3194 */ 3195 if (((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) || 3196 (m->flags & PG_FICTITIOUS) || 3197 ((m->flags & PG_SWAPPED) && 3198 (prot & VM_PROT_WRITE) && 3199 (fault_flags & VM_FAULT_DIRTY))) { 3200 vm_page_wakeup(m); 3201 break; 3202 } 3203 3204 /* 3205 * Enter the page into the pmap. The object might be held 3206 * shared so we can't do any (serious) modifying operation 3207 * on it. 3208 */ 3209 if ((m->queue - m->pc) == PQ_CACHE) 3210 vm_page_deactivate(m); 3211 if (prot & VM_PROT_WRITE) { 3212 vm_object_set_writeable_dirty(m->object); 3213 vm_set_nosync(m, entry); 3214 if (fault_flags & VM_FAULT_DIRTY) { 3215 vm_page_dirty(m); 3216 /* can't happeen due to conditional above */ 3217 /* swap_pager_unswapped(m); */ 3218 } 3219 } 3220 pmap_enter(pmap, addr, m, prot, 0, entry); 3221 #if 0 3222 /* REMOVE ME, a burst counts as one fault */ 3223 mycpu->gd_cnt.v_vm_faults++; 3224 if (curthread->td_lwp) 3225 ++curthread->td_lwp->lwp_ru.ru_minflt; 3226 #endif 3227 vm_page_wakeup(m); 3228 } 3229 } 3230