1 /* 2 * Copyright (c) 2003-2022 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * --- 35 * 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * Copyright (c) 1994 John S. Dyson 39 * All rights reserved. 40 * Copyright (c) 1994 David Greenman 41 * All rights reserved. 42 * 43 * 44 * This code is derived from software contributed to Berkeley by 45 * The Mach Operating System project at Carnegie-Mellon University. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * --- 72 * 73 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 74 * All rights reserved. 75 * 76 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 77 * 78 * Permission to use, copy, modify and distribute this software and 79 * its documentation is hereby granted, provided that both the copyright 80 * notice and this permission notice appear in all copies of the 81 * software, derivative works or modified versions, and any portions 82 * thereof, and that both notices appear in supporting documentation. 83 * 84 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 85 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 86 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 87 * 88 * Carnegie Mellon requests users of this software to return to 89 * 90 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 91 * School of Computer Science 92 * Carnegie Mellon University 93 * Pittsburgh PA 15213-3890 94 * 95 * any improvements or extensions that they make and grant Carnegie the 96 * rights to redistribute these changes. 97 */ 98 99 /* 100 * Page fault handling module. 101 */ 102 103 #include <sys/param.h> 104 #include <sys/systm.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/vnode.h> 108 #include <sys/resourcevar.h> 109 #include <sys/vmmeter.h> 110 #include <sys/vkernel.h> 111 #include <sys/lock.h> 112 #include <sys/sysctl.h> 113 114 #include <cpu/lwbuf.h> 115 116 #include <vm/vm.h> 117 #include <vm/vm_param.h> 118 #include <vm/pmap.h> 119 #include <vm/vm_map.h> 120 #include <vm/vm_object.h> 121 #include <vm/vm_page.h> 122 #include <vm/vm_pageout.h> 123 #include <vm/vm_kern.h> 124 #include <vm/vm_pager.h> 125 #include <vm/vnode_pager.h> 126 #include <vm/swap_pager.h> 127 #include <vm/vm_extern.h> 128 129 #include <vm/vm_page2.h> 130 131 #define VM_FAULT_MAX_QUICK 16 132 133 struct faultstate { 134 vm_page_t mary[VM_FAULT_MAX_QUICK]; 135 vm_map_backing_t ba; 136 vm_prot_t prot; 137 vm_page_t first_m; 138 vm_map_backing_t first_ba; 139 vm_prot_t first_prot; 140 vm_map_t map; 141 vm_map_entry_t entry; 142 int lookup_still_valid; /* 0=inv 1=valid/rel -1=valid/atomic */ 143 int hardfault; 144 int fault_flags; 145 int shared; 146 int msoftonly; 147 int first_shared; 148 int wflags; 149 int first_ba_held; /* 0=unlocked 1=locked/rel -1=lock/atomic */ 150 struct vnode *vp; 151 }; 152 153 __read_mostly static int debug_fault = 0; 154 SYSCTL_INT(_vm, OID_AUTO, debug_fault, CTLFLAG_RW, &debug_fault, 0, ""); 155 __read_mostly static int debug_cluster = 0; 156 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, ""); 157 #if 0 158 static int virtual_copy_enable = 1; 159 SYSCTL_INT(_vm, OID_AUTO, virtual_copy_enable, CTLFLAG_RW, 160 &virtual_copy_enable, 0, ""); 161 #endif 162 __read_mostly int vm_shared_fault = 1; 163 TUNABLE_INT("vm.shared_fault", &vm_shared_fault); 164 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, 165 &vm_shared_fault, 0, "Allow shared token on vm_object"); 166 __read_mostly static int vm_fault_bypass_count = 1; 167 TUNABLE_INT("vm.fault_bypass", &vm_fault_bypass_count); 168 SYSCTL_INT(_vm, OID_AUTO, fault_bypass, CTLFLAG_RW, 169 &vm_fault_bypass_count, 0, "Allow fast vm_fault shortcut"); 170 171 /* 172 * Define here for debugging ioctls. Note that these are globals, so 173 * they were cause a ton of cache line bouncing. Only use for debugging 174 * purposes. 175 */ 176 /*#define VM_FAULT_QUICK_DEBUG */ 177 #ifdef VM_FAULT_QUICK_DEBUG 178 static long vm_fault_bypass_success_count = 0; 179 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_success_count, CTLFLAG_RW, 180 &vm_fault_bypass_success_count, 0, ""); 181 static long vm_fault_bypass_failure_count1 = 0; 182 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count1, CTLFLAG_RW, 183 &vm_fault_bypass_failure_count1, 0, ""); 184 static long vm_fault_bypass_failure_count2 = 0; 185 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count2, CTLFLAG_RW, 186 &vm_fault_bypass_failure_count2, 0, ""); 187 static long vm_fault_bypass_failure_count3 = 0; 188 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count3, CTLFLAG_RW, 189 &vm_fault_bypass_failure_count3, 0, ""); 190 static long vm_fault_bypass_failure_count4 = 0; 191 SYSCTL_LONG(_vm, OID_AUTO, fault_bypass_failure_count4, CTLFLAG_RW, 192 &vm_fault_bypass_failure_count4, 0, ""); 193 #endif 194 195 static int vm_fault_bypass(struct faultstate *fs, vm_pindex_t first_pindex, 196 vm_pindex_t first_count, int *mextcountp, 197 vm_prot_t fault_type); 198 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int); 199 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry); 200 static void vm_prefault(pmap_t pmap, vm_offset_t addra, 201 vm_map_entry_t entry, int prot, int fault_flags); 202 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 203 vm_map_entry_t entry, int prot, int fault_flags); 204 205 static __inline void 206 release_page(struct faultstate *fs) 207 { 208 vm_page_deactivate(fs->mary[0]); 209 vm_page_wakeup(fs->mary[0]); 210 fs->mary[0] = NULL; 211 } 212 213 static __inline void 214 unlock_map(struct faultstate *fs) 215 { 216 if (fs->ba != fs->first_ba) 217 vm_object_drop(fs->ba->object); 218 if (fs->first_ba && fs->first_ba_held == 1) { 219 vm_object_drop(fs->first_ba->object); 220 fs->first_ba_held = 0; 221 fs->first_ba = NULL; 222 } 223 fs->ba = NULL; 224 225 /* 226 * NOTE: If lookup_still_valid == -1 the map is assumed to be locked 227 * and caller expects it to remain locked atomically. 228 */ 229 if (fs->lookup_still_valid == 1 && fs->map) { 230 vm_map_lookup_done(fs->map, fs->entry, 0); 231 fs->lookup_still_valid = 0; 232 fs->entry = NULL; 233 } 234 } 235 236 /* 237 * Clean up after a successful call to vm_fault_object() so another call 238 * to vm_fault_object() can be made. 239 */ 240 static void 241 cleanup_fault(struct faultstate *fs) 242 { 243 /* 244 * We allocated a junk page for a COW operation that did 245 * not occur, the page must be freed. 246 */ 247 if (fs->ba != fs->first_ba) { 248 KKASSERT(fs->first_shared == 0); 249 250 /* 251 * first_m could be completely valid and we got here 252 * because of a PG_RAM, don't mistakenly free it! 253 */ 254 if ((fs->first_m->valid & VM_PAGE_BITS_ALL) == 255 VM_PAGE_BITS_ALL) { 256 vm_page_wakeup(fs->first_m); 257 } else { 258 vm_page_free(fs->first_m); 259 } 260 vm_object_pip_wakeup(fs->ba->object); 261 fs->first_m = NULL; 262 263 /* 264 * Reset fs->ba without calling unlock_map(), so we need a 265 * little duplication. 266 */ 267 vm_object_drop(fs->ba->object); 268 fs->ba = fs->first_ba; 269 } 270 } 271 272 static void 273 unlock_things(struct faultstate *fs) 274 { 275 cleanup_fault(fs); 276 unlock_map(fs); 277 if (fs->vp != NULL) { 278 vput(fs->vp); 279 fs->vp = NULL; 280 } 281 } 282 283 #if 0 284 /* 285 * Virtual copy tests. Used by the fault code to determine if a 286 * page can be moved from an orphan vm_object into its shadow 287 * instead of copying its contents. 288 */ 289 static __inline int 290 virtual_copy_test(struct faultstate *fs) 291 { 292 /* 293 * Must be holding exclusive locks 294 */ 295 if (fs->first_shared || fs->shared || virtual_copy_enable == 0) 296 return 0; 297 298 /* 299 * Map, if present, has not changed 300 */ 301 if (fs->map && fs->map_generation != fs->map->timestamp) 302 return 0; 303 304 /* 305 * No refs, except us 306 */ 307 if (fs->ba->object->ref_count != 1) 308 return 0; 309 310 /* 311 * No one else can look this object up 312 */ 313 if (fs->ba->object->handle != NULL) 314 return 0; 315 316 /* 317 * No other ways to look the object up 318 */ 319 if (fs->ba->object->type != OBJT_DEFAULT && 320 fs->ba->object->type != OBJT_SWAP) 321 return 0; 322 323 /* 324 * We don't chase down the shadow chain 325 */ 326 if (fs->ba != fs->first_ba->backing_ba) 327 return 0; 328 329 return 1; 330 } 331 332 static __inline int 333 virtual_copy_ok(struct faultstate *fs) 334 { 335 if (virtual_copy_test(fs)) { 336 /* 337 * Grab the lock and re-test changeable items. 338 */ 339 if (fs->lookup_still_valid == 0 && fs->map) { 340 if (lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT)) 341 return 0; 342 fs->lookup_still_valid = 1; 343 if (virtual_copy_test(fs)) { 344 fs->map_generation = ++fs->map->timestamp; 345 return 1; 346 } 347 fs->lookup_still_valid = 0; 348 lockmgr(&fs->map->lock, LK_RELEASE); 349 } 350 } 351 return 0; 352 } 353 #endif 354 355 /* 356 * TRYPAGER 357 * 358 * Determine if the pager for the current object *might* contain the page. 359 * 360 * We only need to try the pager if this is not a default object (default 361 * objects are zero-fill and have no real pager), and if we are not taking 362 * a wiring fault or if the FS entry is wired. 363 */ 364 #define TRYPAGER(fs) \ 365 (fs->ba->object->type != OBJT_DEFAULT && \ 366 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || \ 367 (fs->wflags & FW_WIRED))) 368 369 /* 370 * vm_fault: 371 * 372 * Handle a page fault occuring at the given address, requiring the given 373 * permissions, in the map specified. If successful, the page is inserted 374 * into the associated physical map. 375 * 376 * NOTE: The given address should be truncated to the proper page address. 377 * 378 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 379 * a standard error specifying why the fault is fatal is returned. 380 * 381 * The map in question must be referenced, and remains so. 382 * The caller may hold no locks. 383 * No other requirements. 384 */ 385 int 386 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) 387 { 388 vm_pindex_t first_pindex; 389 vm_pindex_t first_count; 390 struct faultstate fs; 391 struct lwp *lp; 392 struct proc *p; 393 thread_t td; 394 int mextcount; 395 int growstack; 396 int retry = 0; 397 int inherit_prot; 398 int result; 399 int n; 400 401 inherit_prot = fault_type & VM_PROT_NOSYNC; 402 fs.hardfault = 0; 403 fs.fault_flags = fault_flags; 404 fs.vp = NULL; 405 fs.shared = vm_shared_fault; 406 fs.first_shared = vm_shared_fault; 407 growstack = 1; 408 409 /* 410 * vm_map interactions 411 */ 412 td = curthread; 413 if ((lp = td->td_lwp) != NULL) 414 lp->lwp_flags |= LWP_PAGING; 415 416 RetryFault: 417 /* 418 * vm_fault_bypass() can shortcut us. 419 */ 420 fs.msoftonly = 0; 421 fs.first_ba_held = 0; 422 mextcount = 1; 423 424 /* 425 * Find the vm_map_entry representing the backing store and resolve 426 * the top level object and page index. This may have the side 427 * effect of executing a copy-on-write on the map entry, 428 * creating a shadow object, or splitting an anonymous entry for 429 * performance, but will not COW any actual VM pages. 430 * 431 * On success fs.map is left read-locked and various other fields 432 * are initialized but not otherwise referenced or locked. 433 * 434 * NOTE! vm_map_lookup will try to upgrade the fault_type to 435 * VM_FAULT_WRITE if the map entry is a virtual page table 436 * and also writable, so we can set the 'A'accessed bit in 437 * the virtual page table entry. 438 */ 439 fs.map = map; 440 result = vm_map_lookup(&fs.map, vaddr, fault_type, 441 &fs.entry, &fs.first_ba, 442 &first_pindex, &first_count, 443 &fs.first_prot, &fs.wflags); 444 445 /* 446 * If the lookup failed or the map protections are incompatible, 447 * the fault generally fails. 448 * 449 * The failure could be due to TDF_NOFAULT if vm_map_lookup() 450 * tried to do a COW fault. 451 * 452 * If the caller is trying to do a user wiring we have more work 453 * to do. 454 */ 455 if (result != KERN_SUCCESS) { 456 if (result == KERN_FAILURE_NOFAULT) { 457 result = KERN_FAILURE; 458 goto done; 459 } 460 if (result != KERN_PROTECTION_FAILURE || 461 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 462 { 463 if (result == KERN_INVALID_ADDRESS && growstack && 464 map != kernel_map && curproc != NULL) { 465 result = vm_map_growstack(map, vaddr); 466 if (result == KERN_SUCCESS) { 467 growstack = 0; 468 ++retry; 469 goto RetryFault; 470 } 471 result = KERN_FAILURE; 472 } 473 goto done; 474 } 475 476 /* 477 * If we are user-wiring a r/w segment, and it is COW, then 478 * we need to do the COW operation. Note that we don't 479 * currently COW RO sections now, because it is NOT desirable 480 * to COW .text. We simply keep .text from ever being COW'ed 481 * and take the heat that one cannot debug wired .text sections. 482 * 483 * XXX Try to allow the above by specifying OVERRIDE_WRITE. 484 */ 485 result = vm_map_lookup(&fs.map, vaddr, 486 VM_PROT_READ | VM_PROT_WRITE | 487 VM_PROT_OVERRIDE_WRITE, 488 &fs.entry, &fs.first_ba, 489 &first_pindex, &first_count, 490 &fs.first_prot, &fs.wflags); 491 if (result != KERN_SUCCESS) { 492 /* could also be KERN_FAILURE_NOFAULT */ 493 result = KERN_FAILURE; 494 goto done; 495 } 496 497 /* 498 * If we don't COW now, on a user wire, the user will never 499 * be able to write to the mapping. If we don't make this 500 * restriction, the bookkeeping would be nearly impossible. 501 * 502 * XXX We have a shared lock, this will have a MP race but 503 * I don't see how it can hurt anything. 504 */ 505 if ((fs.first_prot & VM_PROT_WRITE) == 0) { 506 atomic_clear_char(&fs.entry->max_protection, 507 VM_PROT_WRITE); 508 } 509 } 510 511 /* 512 * fs.map is read-locked 513 * 514 * Misc checks. Save the map generation number to detect races. 515 */ 516 fs.lookup_still_valid = 1; 517 fs.first_m = NULL; 518 fs.ba = fs.first_ba; /* so unlock_things() works */ 519 fs.prot = fs.first_prot; /* default (used by uksmap) */ 520 521 if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) { 522 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 523 panic("vm_fault: fault on nofault entry, addr: %p", 524 (void *)vaddr); 525 } 526 if ((fs.entry->eflags & MAP_ENTRY_KSTACK) && 527 vaddr >= fs.entry->ba.start && 528 vaddr < fs.entry->ba.start + PAGE_SIZE) { 529 panic("vm_fault: fault on stack guard, addr: %p", 530 (void *)vaddr); 531 } 532 } 533 534 /* 535 * A user-kernel shared map has no VM object and bypasses 536 * everything. We execute the uksmap function with a temporary 537 * fictitious vm_page. The address is directly mapped with no 538 * management. 539 */ 540 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 541 struct vm_page fakem; 542 543 bzero(&fakem, sizeof(fakem)); 544 fakem.pindex = first_pindex; 545 fakem.flags = PG_FICTITIOUS | PG_UNQUEUED; 546 fakem.busy_count = PBUSY_LOCKED; 547 fakem.valid = VM_PAGE_BITS_ALL; 548 fakem.pat_mode = VM_MEMATTR_DEFAULT; 549 if (fs.entry->ba.uksmap(&fs.entry->ba, UKSMAPOP_FAULT, 550 fs.entry->aux.dev, &fakem)) { 551 result = KERN_FAILURE; 552 unlock_things(&fs); 553 goto done2; 554 } 555 pmap_enter(fs.map->pmap, vaddr, &fakem, fs.prot | inherit_prot, 556 (fs.wflags & FW_WIRED), fs.entry); 557 goto done_success; 558 } 559 560 /* 561 * A system map entry may return a NULL object. No object means 562 * no pager means an unrecoverable kernel fault. 563 */ 564 if (fs.first_ba == NULL) { 565 panic("vm_fault: unrecoverable fault at %p in entry %p", 566 (void *)vaddr, fs.entry); 567 } 568 569 /* 570 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 571 * is set. 572 * 573 * Unfortunately a deadlock can occur if we are forced to page-in 574 * from swap, but diving all the way into the vm_pager_get_page() 575 * function to find out is too much. Just check the object type. 576 * 577 * The deadlock is a CAM deadlock on a busy VM page when trying 578 * to finish an I/O if another process gets stuck in 579 * vop_helper_read_shortcut() due to a swap fault. 580 */ 581 if ((td->td_flags & TDF_NOFAULT) && 582 (retry || 583 fs.first_ba->object->type == OBJT_VNODE || 584 fs.first_ba->object->type == OBJT_SWAP || 585 fs.first_ba->backing_ba)) { 586 result = KERN_FAILURE; 587 unlock_things(&fs); 588 goto done2; 589 } 590 591 /* 592 * If the entry is wired the page protection level is limited to 593 * what the vm_map_lookup() allowed us. 594 * 595 * XXX it is unclear if this code is still needed as vm_map_lookup() 596 * no longer prevents protection changes on locked memory. REMOVE 597 * IF WE DETERMINE THAT THIS CODE IS NO LONGER NEEDED. 598 */ 599 if (fs.wflags & FW_WIRED) 600 fault_type = fs.first_prot; 601 602 /* 603 * We generally want to avoid unnecessary exclusive modes on backing 604 * and terminal objects because this can seriously interfere with 605 * heavily fork()'d processes (particularly /bin/sh scripts). 606 * 607 * However, we also want to avoid unnecessary retries due to needed 608 * shared->exclusive promotion for common faults. Exclusive mode is 609 * always needed if any page insertion, rename, or free occurs in an 610 * object (and also indirectly if any I/O is done). 611 * 612 * The main issue here is going to be fs.first_shared. If the 613 * first_object has a backing object which isn't shadowed and the 614 * process is single-threaded we might as well use an exclusive 615 * lock/chain right off the bat. 616 */ 617 #if 0 618 /* WORK IN PROGRESS, CODE REMOVED */ 619 if (fs.first_shared && fs.first_object->backing_object && 620 LIST_EMPTY(&fs.first_object->shadow_head) && 621 td->td_proc && td->td_proc->p_nthreads == 1) { 622 fs.first_shared = 0; 623 } 624 #endif 625 626 /* 627 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 628 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 629 * we can try shared first. 630 */ 631 if (fault_flags & VM_FAULT_UNSWAP) 632 fs.first_shared = 0; 633 634 /* 635 * Try to shortcut the entire mess and run the fault lockless. 636 * This will burst in multiple pages via fs->mary[]. 637 */ 638 if (vm_fault_bypass_count && 639 vm_fault_bypass(&fs, first_pindex, first_count, 640 &mextcount, fault_type) == KERN_SUCCESS) { 641 fault_flags &= ~VM_FAULT_BURST; 642 goto success; 643 } 644 645 /* 646 * Exclusive heuristic (alloc page vs page exists) 647 */ 648 if (fs.first_ba->flags & VM_MAP_BACK_EXCL_HEUR) 649 fs.first_shared = 0; 650 651 /* 652 * Obtain a top-level object lock, shared or exclusive depending 653 * on fs.first_shared. If a shared lock winds up being insufficient 654 * we will retry with an exclusive lock. 655 * 656 * The vnode pager lock is always shared. 657 */ 658 if (fs.first_shared) 659 vm_object_hold_shared(fs.first_ba->object); 660 else 661 vm_object_hold(fs.first_ba->object); 662 if (fs.vp == NULL) 663 fs.vp = vnode_pager_lock(fs.first_ba); 664 fs.first_ba_held = 1; 665 666 /* 667 * The page we want is at (first_object, first_pindex). 668 * 669 * Now we have the actual (object, pindex), fault in the page. If 670 * vm_fault_object() fails it will unlock and deallocate the FS 671 * data. If it succeeds everything remains locked and fs->ba->object 672 * will have an additional PIP count if fs->ba != fs->first_ba. 673 * 674 * vm_fault_object will set fs->prot for the pmap operation. It is 675 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the 676 * page can be safely written. However, it will force a read-only 677 * mapping for a read fault if the memory is managed by a virtual 678 * page table. 679 * 680 * If the fault code uses the shared object lock shortcut 681 * we must not try to burst (we can't allocate VM pages). 682 */ 683 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 684 685 if (debug_fault > 0) { 686 --debug_fault; 687 kprintf("VM_FAULT result %d addr=%jx type=%02x flags=%02x " 688 "fs.m=%p fs.prot=%02x fs.wflags=%02x fs.entry=%p\n", 689 result, (intmax_t)vaddr, fault_type, fault_flags, 690 fs.mary[0], fs.prot, fs.wflags, fs.entry); 691 } 692 693 if (result == KERN_TRY_AGAIN) { 694 ++retry; 695 goto RetryFault; 696 } 697 if (result != KERN_SUCCESS) { 698 goto done; 699 } 700 701 success: 702 /* 703 * On success vm_fault_object() does not unlock or deallocate, and fs.m 704 * will contain a busied page. It does drop fs->ba if appropriate. 705 * 706 * Enter the page into the pmap and do pmap-related adjustments. 707 * 708 * WARNING! Soft-busied fs.m's can only be manipulated in limited 709 * ways. 710 */ 711 KKASSERT(fs.lookup_still_valid != 0); 712 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 713 714 for (n = 0; n < mextcount; ++n) { 715 pmap_enter(fs.map->pmap, vaddr + (n << PAGE_SHIFT), 716 fs.mary[n], fs.prot | inherit_prot, 717 fs.wflags & FW_WIRED, fs.entry); 718 } 719 720 /* 721 * If the page is not wired down, then put it where the pageout daemon 722 * can find it. 723 * 724 * NOTE: We cannot safely wire, unwire, or adjust queues for a 725 * soft-busied page. 726 */ 727 for (n = 0; n < mextcount; ++n) { 728 if (fs.msoftonly) { 729 KKASSERT(fs.mary[n]->busy_count & PBUSY_MASK); 730 KKASSERT((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0); 731 vm_page_sbusy_drop(fs.mary[n]); 732 } else { 733 if (fs.fault_flags & VM_FAULT_WIRE_MASK) { 734 if (fs.wflags & FW_WIRED) 735 vm_page_wire(fs.mary[n]); 736 else 737 vm_page_unwire(fs.mary[n], 1); 738 } else { 739 vm_page_activate(fs.mary[n]); 740 } 741 KKASSERT(fs.mary[n]->busy_count & PBUSY_LOCKED); 742 vm_page_wakeup(fs.mary[n]); 743 } 744 } 745 746 /* 747 * Burst in a few more pages if possible. The fs.map should still 748 * be locked. To avoid interlocking against a vnode->getblk 749 * operation we had to be sure to unbusy our primary vm_page above 750 * first. 751 * 752 * A normal burst can continue down backing store, only execute 753 * if we are holding an exclusive lock, otherwise the exclusive 754 * locks the burst code gets might cause excessive SMP collisions. 755 * 756 * A quick burst can be utilized when there is no backing object 757 * (i.e. a shared file mmap). 758 */ 759 if ((fault_flags & VM_FAULT_BURST) && 760 (fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 && 761 (fs.wflags & FW_WIRED) == 0) { 762 if (fs.first_shared == 0 && fs.shared == 0) { 763 vm_prefault(fs.map->pmap, vaddr, 764 fs.entry, fs.prot, fault_flags); 765 } else { 766 vm_prefault_quick(fs.map->pmap, vaddr, 767 fs.entry, fs.prot, fault_flags); 768 } 769 } 770 771 done_success: 772 /* 773 * Unlock everything, and return 774 */ 775 unlock_things(&fs); 776 777 mycpu->gd_cnt.v_vm_faults++; 778 if (td->td_lwp) { 779 if (fs.hardfault) { 780 ++td->td_lwp->lwp_ru.ru_majflt; 781 } else { 782 ++td->td_lwp->lwp_ru.ru_minflt; 783 } 784 } 785 786 /*vm_object_deallocate(fs.first_ba->object);*/ 787 /*fs.m = NULL; */ 788 789 result = KERN_SUCCESS; 790 done: 791 if (fs.first_ba && fs.first_ba->object && fs.first_ba_held == 1) { 792 vm_object_drop(fs.first_ba->object); 793 fs.first_ba_held = 0; 794 } 795 done2: 796 if (lp) 797 lp->lwp_flags &= ~LWP_PAGING; 798 799 #if !defined(NO_SWAPPING) 800 /* 801 * Check the process RSS limit and force deactivation and 802 * (asynchronous) paging if necessary. This is a complex operation, 803 * only do it for direct user-mode faults, for now. 804 * 805 * To reduce overhead implement approximately a ~16MB hysteresis. 806 */ 807 p = td->td_proc; 808 if ((fault_flags & VM_FAULT_USERMODE) && lp && 809 p->p_limit && map->pmap && vm_pageout_memuse_mode >= 1 && 810 map != kernel_map) { 811 vm_pindex_t limit; 812 vm_pindex_t size; 813 814 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 815 p->p_rlimit[RLIMIT_RSS].rlim_max)); 816 size = pmap_resident_tlnw_count(map->pmap); 817 if (limit >= 0 && size > 4096 && size - 4096 >= limit) { 818 vm_pageout_map_deactivate_pages(map, limit); 819 } 820 } 821 #endif 822 823 if (result != KERN_SUCCESS && debug_fault < 0) { 824 kprintf("VM_FAULT %d:%d (%s) result %d " 825 "addr=%jx type=%02x flags=%02x " 826 "fs.m=%p fs.prot=%02x fs.wflags=%02x fs.entry=%p\n", 827 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 828 (curthread->td_lwp ? curthread->td_lwp->lwp_tid : -1), 829 curthread->td_comm, 830 result, 831 (intmax_t)vaddr, fault_type, fault_flags, 832 fs.mary[0], fs.prot, fs.wflags, fs.entry); 833 while (debug_fault < 0 && (debug_fault & 1)) 834 tsleep(&debug_fault, 0, "DEBUG", hz); 835 } 836 837 return (result); 838 } 839 840 /* 841 * Attempt a lockless vm_fault() shortcut. The stars have to align for this 842 * to work. But if it does we can get our page only soft-busied and not 843 * have to touch the vm_object or vnode locks at all. 844 */ 845 static 846 int 847 vm_fault_bypass(struct faultstate *fs, vm_pindex_t first_pindex, 848 vm_pindex_t first_count, int *mextcountp, 849 vm_prot_t fault_type) 850 { 851 vm_page_t m; 852 vm_object_t obj; /* NOT LOCKED */ 853 int n; 854 int nlim; 855 856 /* 857 * Don't waste time if the object is only being used by one vm_map. 858 */ 859 obj = fs->first_ba->object; 860 #if 0 861 if (obj->flags & OBJ_ONEMAPPING) 862 return KERN_FAILURE; 863 #endif 864 865 /* 866 * This will try to wire/unwire a page, which can't be done with 867 * a soft-busied page. 868 */ 869 if (fs->fault_flags & VM_FAULT_WIRE_MASK) 870 return KERN_FAILURE; 871 872 /* 873 * Ok, try to get the vm_page quickly via the hash table. The 874 * page will be soft-busied on success (NOT hard-busied). 875 */ 876 m = vm_page_hash_get(obj, first_pindex); 877 if (m == NULL) { 878 #ifdef VM_FAULT_QUICK_DEBUG 879 ++vm_fault_bypass_failure_count2; 880 #endif 881 return KERN_FAILURE; 882 } 883 if ((obj->flags & OBJ_DEAD) || 884 m->valid != VM_PAGE_BITS_ALL || 885 m->queue - m->pc != PQ_ACTIVE || 886 (m->flags & PG_SWAPPED)) { 887 vm_page_sbusy_drop(m); 888 #ifdef VM_FAULT_QUICK_DEBUG 889 ++vm_fault_bypass_failure_count3; 890 #endif 891 return KERN_FAILURE; 892 } 893 894 /* 895 * The page is already fully valid, ACTIVE, and is not PG_SWAPPED. 896 * 897 * Don't map the page writable when emulating the dirty bit, a 898 * fault must be taken for proper emulation (vkernel). 899 */ 900 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 901 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 902 if ((fault_type & VM_PROT_WRITE) == 0) 903 fs->prot &= ~VM_PROT_WRITE; 904 } 905 906 /* 907 * If this is a write fault the object and the page must already 908 * be writable. Since we don't hold an object lock and only a 909 * soft-busy on the page, we cannot manipulate the object or 910 * the page state (other than the page queue). 911 */ 912 if (fs->prot & VM_PROT_WRITE) { 913 if ((obj->flags & (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY)) != 914 (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY) || 915 m->dirty != VM_PAGE_BITS_ALL) { 916 vm_page_sbusy_drop(m); 917 #ifdef VM_FAULT_QUICK_DEBUG 918 ++vm_fault_bypass_failure_count4; 919 #endif 920 return KERN_FAILURE; 921 } 922 vm_set_nosync(m, fs->entry); 923 } 924 925 /* 926 * Set page and potentially burst in more 927 * 928 * Even though we are only soft-busied we can still move pages 929 * around in the normal queue(s). The soft-busy prevents the 930 * page from being removed from the object, etc (normal operation). 931 * 932 * However, in this fast path it is excessively important to avoid 933 * any hard locks, so we use a special passive version of activate. 934 */ 935 fs->msoftonly = 1; 936 fs->mary[0] = m; 937 vm_page_soft_activate(m); 938 939 if (vm_fault_bypass_count > 1) { 940 nlim = vm_fault_bypass_count; 941 if (nlim > VM_FAULT_MAX_QUICK) /* array limit(+1) */ 942 nlim = VM_FAULT_MAX_QUICK; 943 if (nlim > first_count) /* user limit */ 944 nlim = first_count; 945 946 for (n = 1; n < nlim; ++n) { 947 m = vm_page_hash_get(obj, first_pindex + n); 948 if (m == NULL) 949 break; 950 if (m->valid != VM_PAGE_BITS_ALL || 951 m->queue - m->pc != PQ_ACTIVE || 952 (m->flags & PG_SWAPPED)) { 953 vm_page_sbusy_drop(m); 954 break; 955 } 956 if (fs->prot & VM_PROT_WRITE) { 957 if ((obj->flags & (OBJ_WRITEABLE | 958 OBJ_MIGHTBEDIRTY)) != 959 (OBJ_WRITEABLE | OBJ_MIGHTBEDIRTY) || 960 m->dirty != VM_PAGE_BITS_ALL) { 961 vm_page_sbusy_drop(m); 962 break; 963 } 964 } 965 vm_page_soft_activate(m); 966 fs->mary[n] = m; 967 } 968 *mextcountp = n; 969 } 970 971 #ifdef VM_FAULT_QUICK_DEBUG 972 ++vm_fault_bypass_success_count; 973 #endif 974 975 return KERN_SUCCESS; 976 } 977 978 /* 979 * Fault in the specified virtual address in the current process map, 980 * returning a held VM page or NULL. See vm_fault_page() for more 981 * information. 982 * 983 * No requirements. 984 */ 985 vm_page_t 986 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, 987 int *errorp, int *busyp) 988 { 989 struct lwp *lp = curthread->td_lwp; 990 vm_page_t m; 991 992 m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 993 fault_type, VM_FAULT_NORMAL, 994 errorp, busyp); 995 return(m); 996 } 997 998 /* 999 * Fault in the specified virtual address in the specified map, doing all 1000 * necessary manipulation of the object store and all necessary I/O. Return 1001 * a held VM page or NULL, and set *errorp. The related pmap is not 1002 * updated. 1003 * 1004 * If busyp is not NULL then *busyp will be set to TRUE if this routine 1005 * decides to return a busied page (aka VM_PROT_WRITE), or FALSE if it 1006 * does not (VM_PROT_WRITE not specified or busyp is NULL). If busyp is 1007 * NULL the returned page is only held. 1008 * 1009 * If the caller has no intention of writing to the page's contents, busyp 1010 * can be passed as NULL along with VM_PROT_WRITE to force a COW operation 1011 * without busying the page. 1012 * 1013 * The returned page will also be marked PG_REFERENCED. 1014 * 1015 * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an 1016 * error will be returned. 1017 * 1018 * No requirements. 1019 */ 1020 vm_page_t 1021 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1022 int fault_flags, int *errorp, int *busyp) 1023 { 1024 vm_pindex_t first_pindex; 1025 vm_pindex_t first_count; 1026 struct faultstate fs; 1027 int result; 1028 int retry; 1029 int growstack; 1030 int didcow; 1031 vm_prot_t orig_fault_type = fault_type; 1032 1033 retry = 0; 1034 didcow = 0; 1035 fs.hardfault = 0; 1036 fs.fault_flags = fault_flags; 1037 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 1038 1039 /* 1040 * Dive the pmap (concurrency possible). If we find the 1041 * appropriate page we can terminate early and quickly. 1042 * 1043 * This works great for normal programs but will always return 1044 * NULL for host lookups of vkernel maps in VMM mode. 1045 * 1046 * NOTE: pmap_fault_page_quick() might not busy the page. If 1047 * VM_PROT_WRITE is set in fault_type and pmap_fault_page_quick() 1048 * returns non-NULL, it will safely dirty the returned vm_page_t 1049 * for us. We cannot safely dirty it here (it might not be 1050 * busy). 1051 */ 1052 fs.mary[0] = pmap_fault_page_quick(map->pmap, vaddr, fault_type, busyp); 1053 if (fs.mary[0]) { 1054 *errorp = 0; 1055 return(fs.mary[0]); 1056 } 1057 1058 /* 1059 * Otherwise take a concurrency hit and do a formal page 1060 * fault. 1061 */ 1062 fs.vp = NULL; 1063 fs.shared = vm_shared_fault; 1064 fs.first_shared = vm_shared_fault; 1065 fs.msoftonly = 0; 1066 growstack = 1; 1067 1068 /* 1069 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 1070 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 1071 * we can try shared first. 1072 */ 1073 if (fault_flags & VM_FAULT_UNSWAP) { 1074 fs.first_shared = 0; 1075 } 1076 1077 RetryFault: 1078 /* 1079 * Find the vm_map_entry representing the backing store and resolve 1080 * the top level object and page index. This may have the side 1081 * effect of executing a copy-on-write on the map entry and/or 1082 * creating a shadow object, but will not COW any actual VM pages. 1083 * 1084 * On success fs.map is left read-locked and various other fields 1085 * are initialized but not otherwise referenced or locked. 1086 * 1087 * NOTE! vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE 1088 * if the map entry is a virtual page table and also writable, 1089 * so we can set the 'A'accessed bit in the virtual page table 1090 * entry. 1091 */ 1092 fs.map = map; 1093 fs.first_ba_held = 0; 1094 result = vm_map_lookup(&fs.map, vaddr, fault_type, 1095 &fs.entry, &fs.first_ba, 1096 &first_pindex, &first_count, 1097 &fs.first_prot, &fs.wflags); 1098 1099 if (result != KERN_SUCCESS) { 1100 if (result == KERN_FAILURE_NOFAULT) { 1101 *errorp = KERN_FAILURE; 1102 fs.mary[0] = NULL; 1103 goto done; 1104 } 1105 if (result != KERN_PROTECTION_FAILURE || 1106 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 1107 { 1108 if (result == KERN_INVALID_ADDRESS && growstack && 1109 map != kernel_map && curproc != NULL) { 1110 result = vm_map_growstack(map, vaddr); 1111 if (result == KERN_SUCCESS) { 1112 growstack = 0; 1113 ++retry; 1114 goto RetryFault; 1115 } 1116 result = KERN_FAILURE; 1117 } 1118 fs.mary[0] = NULL; 1119 *errorp = result; 1120 goto done; 1121 } 1122 1123 /* 1124 * If we are user-wiring a r/w segment, and it is COW, then 1125 * we need to do the COW operation. Note that we don't 1126 * currently COW RO sections now, because it is NOT desirable 1127 * to COW .text. We simply keep .text from ever being COW'ed 1128 * and take the heat that one cannot debug wired .text sections. 1129 */ 1130 result = vm_map_lookup(&fs.map, vaddr, 1131 VM_PROT_READ | VM_PROT_WRITE | 1132 VM_PROT_OVERRIDE_WRITE, 1133 &fs.entry, &fs.first_ba, 1134 &first_pindex, &first_count, 1135 &fs.first_prot, &fs.wflags); 1136 if (result != KERN_SUCCESS) { 1137 /* could also be KERN_FAILURE_NOFAULT */ 1138 *errorp = KERN_FAILURE; 1139 fs.mary[0] = NULL; 1140 goto done; 1141 } 1142 1143 /* 1144 * If we don't COW now, on a user wire, the user will never 1145 * be able to write to the mapping. If we don't make this 1146 * restriction, the bookkeeping would be nearly impossible. 1147 * 1148 * XXX We have a shared lock, this will have a MP race but 1149 * I don't see how it can hurt anything. 1150 */ 1151 if ((fs.first_prot & VM_PROT_WRITE) == 0) { 1152 atomic_clear_char(&fs.entry->max_protection, 1153 VM_PROT_WRITE); 1154 } 1155 } 1156 1157 /* 1158 * fs.map is read-locked 1159 * 1160 * Misc checks. Save the map generation number to detect races. 1161 */ 1162 fs.lookup_still_valid = 1; 1163 fs.first_m = NULL; 1164 fs.ba = fs.first_ba; 1165 1166 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 1167 panic("vm_fault: fault on nofault entry, addr: %lx", 1168 (u_long)vaddr); 1169 } 1170 1171 /* 1172 * A user-kernel shared map has no VM object and bypasses 1173 * everything. We execute the uksmap function with a temporary 1174 * fictitious vm_page. The address is directly mapped with no 1175 * management. 1176 */ 1177 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 1178 struct vm_page fakem; 1179 1180 bzero(&fakem, sizeof(fakem)); 1181 fakem.pindex = first_pindex; 1182 fakem.flags = PG_FICTITIOUS | PG_UNQUEUED; 1183 fakem.busy_count = PBUSY_LOCKED; 1184 fakem.valid = VM_PAGE_BITS_ALL; 1185 fakem.pat_mode = VM_MEMATTR_DEFAULT; 1186 if (fs.entry->ba.uksmap(&fs.entry->ba, UKSMAPOP_FAULT, 1187 fs.entry->aux.dev, &fakem)) { 1188 *errorp = KERN_FAILURE; 1189 fs.mary[0] = NULL; 1190 unlock_things(&fs); 1191 goto done2; 1192 } 1193 fs.mary[0] = PHYS_TO_VM_PAGE(fakem.phys_addr); 1194 vm_page_hold(fs.mary[0]); 1195 if (busyp) 1196 *busyp = 0; /* don't need to busy R or W */ 1197 unlock_things(&fs); 1198 *errorp = 0; 1199 goto done; 1200 } 1201 1202 1203 /* 1204 * A system map entry may return a NULL object. No object means 1205 * no pager means an unrecoverable kernel fault. 1206 */ 1207 if (fs.first_ba == NULL) { 1208 panic("vm_fault: unrecoverable fault at %p in entry %p", 1209 (void *)vaddr, fs.entry); 1210 } 1211 1212 /* 1213 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 1214 * is set. 1215 * 1216 * Unfortunately a deadlock can occur if we are forced to page-in 1217 * from swap, but diving all the way into the vm_pager_get_page() 1218 * function to find out is too much. Just check the object type. 1219 */ 1220 if ((curthread->td_flags & TDF_NOFAULT) && 1221 (retry || 1222 fs.first_ba->object->type == OBJT_VNODE || 1223 fs.first_ba->object->type == OBJT_SWAP || 1224 fs.first_ba->backing_ba)) { 1225 *errorp = KERN_FAILURE; 1226 unlock_things(&fs); 1227 fs.mary[0] = NULL; 1228 goto done2; 1229 } 1230 1231 /* 1232 * If the entry is wired the page protection level is limited to 1233 * what the vm_map_lookup() allowed us. 1234 * 1235 * XXX it is unclear if this code is still needed as vm_map_lookup() 1236 * no longer prevents protection changes on locked memory. REMOVE 1237 * IF WE DETERMINE THAT THIS CODE IS NO LONGER NEEDED. 1238 */ 1239 if (fs.wflags & FW_WIRED) 1240 fault_type = fs.first_prot; 1241 1242 /* 1243 * Make a reference to this object to prevent its disposal while we 1244 * are messing with it. Once we have the reference, the map is free 1245 * to be diddled. Since objects reference their shadows (and copies), 1246 * they will stay around as well. 1247 * 1248 * The reference should also prevent an unexpected collapse of the 1249 * parent that might move pages from the current object into the 1250 * parent unexpectedly, resulting in corruption. 1251 * 1252 * Bump the paging-in-progress count to prevent size changes (e.g. 1253 * truncation operations) during I/O. This must be done after 1254 * obtaining the vnode lock in order to avoid possible deadlocks. 1255 */ 1256 if (fs.first_ba->flags & VM_MAP_BACK_EXCL_HEUR) 1257 fs.first_shared = 0; 1258 1259 if (fs.first_shared) 1260 vm_object_hold_shared(fs.first_ba->object); 1261 else 1262 vm_object_hold(fs.first_ba->object); 1263 fs.first_ba_held = 1; 1264 if (fs.vp == NULL) 1265 fs.vp = vnode_pager_lock(fs.first_ba); /* shared */ 1266 1267 /* 1268 * The page we want is at (first_object, first_pindex). 1269 * 1270 * Now we have the actual (object, pindex), fault in the page. If 1271 * vm_fault_object() fails it will unlock and deallocate the FS 1272 * data. If it succeeds everything remains locked and fs->ba->object 1273 * will have an additinal PIP count if fs->ba != fs->first_ba. 1274 */ 1275 fs.mary[0] = NULL; 1276 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 1277 1278 if (result == KERN_TRY_AGAIN) { 1279 KKASSERT(fs.first_ba_held == 0); 1280 ++retry; 1281 didcow |= fs.wflags & FW_DIDCOW; 1282 goto RetryFault; 1283 } 1284 if (result != KERN_SUCCESS) { 1285 *errorp = result; 1286 fs.mary[0] = NULL; 1287 goto done; 1288 } 1289 1290 if ((orig_fault_type & VM_PROT_WRITE) && 1291 (fs.prot & VM_PROT_WRITE) == 0) { 1292 *errorp = KERN_PROTECTION_FAILURE; 1293 unlock_things(&fs); 1294 fs.mary[0] = NULL; 1295 goto done; 1296 } 1297 1298 /* 1299 * Generally speaking we don't want to update the pmap because 1300 * this routine can be called many times for situations that do 1301 * not require updating the pmap, not to mention the page might 1302 * already be in the pmap. 1303 * 1304 * However, if our vm_map_lookup() results in a COW, we need to 1305 * at least remove the pte from the pmap to guarantee proper 1306 * visibility of modifications made to the process. For example, 1307 * modifications made by vkernel uiocopy/related routines and 1308 * modifications made by ptrace(). 1309 */ 1310 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 1311 #if 0 1312 pmap_enter(fs.map->pmap, vaddr, fs.mary[0], fs.prot, 1313 fs.wflags & FW_WIRED, NULL); 1314 mycpu->gd_cnt.v_vm_faults++; 1315 if (curthread->td_lwp) 1316 ++curthread->td_lwp->lwp_ru.ru_minflt; 1317 #endif 1318 if ((fs.wflags | didcow) & FW_DIDCOW) { 1319 pmap_remove(fs.map->pmap, 1320 vaddr & ~PAGE_MASK, 1321 (vaddr & ~PAGE_MASK) + PAGE_SIZE); 1322 } 1323 1324 /* 1325 * On success vm_fault_object() does not unlock or deallocate, and 1326 * fs.mary[0] will contain a busied page. So we must unlock here 1327 * after having messed with the pmap. 1328 */ 1329 unlock_things(&fs); 1330 1331 /* 1332 * Return a held page. We are not doing any pmap manipulation so do 1333 * not set PG_MAPPED. However, adjust the page flags according to 1334 * the fault type because the caller may not use a managed pmapping 1335 * (so we don't want to lose the fact that the page will be dirtied 1336 * if a write fault was specified). 1337 */ 1338 if (fault_type & VM_PROT_WRITE) 1339 vm_page_dirty(fs.mary[0]); 1340 vm_page_activate(fs.mary[0]); 1341 1342 if (curthread->td_lwp) { 1343 if (fs.hardfault) { 1344 curthread->td_lwp->lwp_ru.ru_majflt++; 1345 } else { 1346 curthread->td_lwp->lwp_ru.ru_minflt++; 1347 } 1348 } 1349 1350 /* 1351 * Unlock everything, and return the held or busied page. 1352 */ 1353 if (busyp) { 1354 if (fault_type & VM_PROT_WRITE) { 1355 vm_page_dirty(fs.mary[0]); 1356 *busyp = 1; 1357 } else { 1358 *busyp = 0; 1359 vm_page_hold(fs.mary[0]); 1360 vm_page_wakeup(fs.mary[0]); 1361 } 1362 } else { 1363 vm_page_hold(fs.mary[0]); 1364 vm_page_wakeup(fs.mary[0]); 1365 } 1366 /*vm_object_deallocate(fs.first_ba->object);*/ 1367 *errorp = 0; 1368 1369 done: 1370 KKASSERT(fs.first_ba_held == 0); 1371 done2: 1372 return(fs.mary[0]); 1373 } 1374 1375 /* 1376 * Fault in the specified (object,offset), dirty the returned page as 1377 * needed. If the requested fault_type cannot be done NULL and an 1378 * error is returned. 1379 * 1380 * A held (but not busied) page is returned. 1381 * 1382 * The passed in object must be held as specified by the shared 1383 * argument. 1384 */ 1385 vm_page_t 1386 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset, 1387 vm_prot_t fault_type, int fault_flags, 1388 int *sharedp, int *errorp) 1389 { 1390 int result; 1391 vm_pindex_t first_pindex; 1392 vm_pindex_t first_count; 1393 struct faultstate fs; 1394 struct vm_map_entry entry; 1395 1396 /* 1397 * Since we aren't actually faulting the page into a 1398 * pmap we can just fake the entry.ba. 1399 */ 1400 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1401 bzero(&entry, sizeof(entry)); 1402 entry.maptype = VM_MAPTYPE_NORMAL; 1403 entry.protection = entry.max_protection = fault_type; 1404 entry.ba.backing_ba = NULL; 1405 entry.ba.object = object; 1406 entry.ba.offset = 0; 1407 1408 fs.hardfault = 0; 1409 fs.fault_flags = fault_flags; 1410 fs.map = NULL; 1411 fs.shared = vm_shared_fault; 1412 fs.first_shared = *sharedp; 1413 fs.msoftonly = 0; 1414 fs.vp = NULL; 1415 fs.first_ba_held = -1; /* object held across call, prevent drop */ 1416 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 1417 1418 /* 1419 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 1420 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 1421 * we can try shared first. 1422 */ 1423 if (fs.first_shared && (fault_flags & VM_FAULT_UNSWAP)) { 1424 fs.first_shared = 0; 1425 vm_object_upgrade(object); 1426 } 1427 1428 /* 1429 * Retry loop as needed (typically for shared->exclusive transitions) 1430 */ 1431 RetryFault: 1432 *sharedp = fs.first_shared; 1433 first_pindex = OFF_TO_IDX(offset); 1434 first_count = 1; 1435 fs.first_ba = &entry.ba; 1436 fs.ba = fs.first_ba; 1437 fs.entry = &entry; 1438 fs.first_prot = fault_type; 1439 fs.wflags = 0; 1440 1441 /* 1442 * Make a reference to this object to prevent its disposal while we 1443 * are messing with it. Once we have the reference, the map is free 1444 * to be diddled. Since objects reference their shadows (and copies), 1445 * they will stay around as well. 1446 * 1447 * The reference should also prevent an unexpected collapse of the 1448 * parent that might move pages from the current object into the 1449 * parent unexpectedly, resulting in corruption. 1450 * 1451 * Bump the paging-in-progress count to prevent size changes (e.g. 1452 * truncation operations) during I/O. This must be done after 1453 * obtaining the vnode lock in order to avoid possible deadlocks. 1454 */ 1455 if (fs.vp == NULL) 1456 fs.vp = vnode_pager_lock(fs.first_ba); 1457 1458 fs.lookup_still_valid = 1; 1459 fs.first_m = NULL; 1460 1461 /* 1462 * Now we have the actual (object, pindex), fault in the page. If 1463 * vm_fault_object() fails it will unlock and deallocate the FS 1464 * data. If it succeeds everything remains locked and fs->ba->object 1465 * will have an additinal PIP count if fs->ba != fs->first_ba. 1466 * 1467 * On KERN_TRY_AGAIN vm_fault_object() leaves fs.first_ba intact. 1468 * We may have to upgrade its lock to handle the requested fault. 1469 */ 1470 result = vm_fault_object(&fs, first_pindex, fault_type, 0); 1471 1472 if (result == KERN_TRY_AGAIN) { 1473 if (fs.first_shared == 0 && *sharedp) 1474 vm_object_upgrade(object); 1475 goto RetryFault; 1476 } 1477 if (result != KERN_SUCCESS) { 1478 *errorp = result; 1479 return(NULL); 1480 } 1481 1482 if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) { 1483 *errorp = KERN_PROTECTION_FAILURE; 1484 unlock_things(&fs); 1485 return(NULL); 1486 } 1487 1488 /* 1489 * On success vm_fault_object() does not unlock or deallocate, so we 1490 * do it here. Note that the returned fs.m will be busied. 1491 */ 1492 unlock_things(&fs); 1493 1494 /* 1495 * Return a held page. We are not doing any pmap manipulation so do 1496 * not set PG_MAPPED. However, adjust the page flags according to 1497 * the fault type because the caller may not use a managed pmapping 1498 * (so we don't want to lose the fact that the page will be dirtied 1499 * if a write fault was specified). 1500 */ 1501 vm_page_hold(fs.mary[0]); 1502 vm_page_activate(fs.mary[0]); 1503 if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY)) 1504 vm_page_dirty(fs.mary[0]); 1505 if (fault_flags & VM_FAULT_UNSWAP) 1506 swap_pager_unswapped(fs.mary[0]); 1507 1508 /* 1509 * Indicate that the page was accessed. 1510 */ 1511 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 1512 1513 if (curthread->td_lwp) { 1514 if (fs.hardfault) { 1515 curthread->td_lwp->lwp_ru.ru_majflt++; 1516 } else { 1517 curthread->td_lwp->lwp_ru.ru_minflt++; 1518 } 1519 } 1520 1521 /* 1522 * Unlock everything, and return the held page. 1523 */ 1524 vm_page_wakeup(fs.mary[0]); 1525 /*vm_object_deallocate(fs.first_ba->object);*/ 1526 1527 *errorp = 0; 1528 return(fs.mary[0]); 1529 } 1530 1531 /* 1532 * This is the core of the vm_fault code. 1533 * 1534 * Do all operations required to fault-in (fs.first_ba->object, pindex). 1535 * Run through the backing store as necessary and do required COW or virtual 1536 * copy operations. The caller has already fully resolved the vm_map_entry 1537 * and, if appropriate, has created a copy-on-write layer. All we need to 1538 * do is iterate the object chain. 1539 * 1540 * On failure (fs) is unlocked and deallocated and the caller may return or 1541 * retry depending on the failure code. On success (fs) is NOT unlocked or 1542 * deallocated, fs.mary[0] will contained a resolved, busied page, and fs.ba's 1543 * object will have an additional PIP count if it is not equal to 1544 * fs.first_ba. 1545 * 1546 * If locks based on fs->first_shared or fs->shared are insufficient, 1547 * clear the appropriate field(s) and return RETRY. COWs require that 1548 * first_shared be 0, while page allocations (or frees) require that 1549 * shared be 0. Renames require that both be 0. 1550 * 1551 * NOTE! fs->[first_]shared might be set with VM_FAULT_DIRTY also set. 1552 * we will have to retry with it exclusive if the vm_page is 1553 * PG_SWAPPED. 1554 * 1555 * fs->first_ba->object must be held on call. 1556 */ 1557 static 1558 int 1559 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex, 1560 vm_prot_t fault_type, int allow_nofault) 1561 { 1562 vm_map_backing_t next_ba; 1563 vm_pindex_t pindex; 1564 int error; 1565 1566 ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_ba->object)); 1567 fs->prot = fs->first_prot; 1568 pindex = first_pindex; 1569 KKASSERT(fs->ba == fs->first_ba); 1570 1571 vm_object_pip_add(fs->first_ba->object, 1); 1572 1573 /* 1574 * If a read fault occurs we try to upgrade the page protection 1575 * and make it also writable if possible. There are three cases 1576 * where we cannot make the page mapping writable: 1577 * 1578 * (1) The mapping is read-only or the VM object is read-only, 1579 * fs->prot above will simply not have VM_PROT_WRITE set. 1580 * 1581 * (2) If the VM page is read-only or copy-on-write, upgrading would 1582 * just result in an unnecessary COW fault. 1583 * 1584 * (3) If the pmap specifically requests A/M bit emulation, downgrade 1585 * here. 1586 */ 1587 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 1588 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 1589 if ((fault_type & VM_PROT_WRITE) == 0) 1590 fs->prot &= ~VM_PROT_WRITE; 1591 } 1592 1593 /* vm_object_hold(fs->ba->object); implied b/c ba == first_ba */ 1594 1595 for (;;) { 1596 /* 1597 * If the object is dead, we stop here 1598 */ 1599 if (fs->ba->object->flags & OBJ_DEAD) { 1600 vm_object_pip_wakeup(fs->first_ba->object); 1601 unlock_things(fs); 1602 return (KERN_PROTECTION_FAILURE); 1603 } 1604 1605 /* 1606 * See if the page is resident. Wait/Retry if the page is 1607 * busy (lots of stuff may have changed so we can't continue 1608 * in that case). 1609 * 1610 * We can theoretically allow the soft-busy case on a read 1611 * fault if the page is marked valid, but since such 1612 * pages are typically already pmap'd, putting that 1613 * special case in might be more effort then it is 1614 * worth. We cannot under any circumstances mess 1615 * around with a vm_page_t->busy page except, perhaps, 1616 * to pmap it. 1617 */ 1618 fs->mary[0] = vm_page_lookup_busy_try(fs->ba->object, pindex, 1619 TRUE, &error); 1620 if (error) { 1621 vm_object_pip_wakeup(fs->first_ba->object); 1622 unlock_things(fs); 1623 vm_page_sleep_busy(fs->mary[0], TRUE, "vmpfw"); 1624 mycpu->gd_cnt.v_intrans++; 1625 fs->mary[0] = NULL; 1626 return (KERN_TRY_AGAIN); 1627 } 1628 if (fs->mary[0]) { 1629 /* 1630 * The page is busied for us. 1631 * 1632 * If reactivating a page from PQ_CACHE we may have 1633 * to rate-limit. 1634 */ 1635 int queue = fs->mary[0]->queue; 1636 vm_page_unqueue_nowakeup(fs->mary[0]); 1637 1638 if ((queue - fs->mary[0]->pc) == PQ_CACHE && 1639 vm_paging_severe()) { 1640 vm_page_activate(fs->mary[0]); 1641 vm_page_wakeup(fs->mary[0]); 1642 fs->mary[0] = NULL; 1643 vm_object_pip_wakeup(fs->first_ba->object); 1644 unlock_things(fs); 1645 if (allow_nofault == 0 || 1646 (curthread->td_flags & TDF_NOFAULT) == 0) { 1647 thread_t td; 1648 1649 vm_wait_pfault(); 1650 td = curthread; 1651 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1652 return (KERN_PROTECTION_FAILURE); 1653 } 1654 return (KERN_TRY_AGAIN); 1655 } 1656 1657 /* 1658 * If it still isn't completely valid (readable), 1659 * or if a read-ahead-mark is set on the VM page, 1660 * jump to readrest, else we found the page and 1661 * can return. 1662 * 1663 * We can release the spl once we have marked the 1664 * page busy. 1665 */ 1666 if (fs->mary[0]->object != kernel_object) { 1667 if ((fs->mary[0]->valid & VM_PAGE_BITS_ALL) != 1668 VM_PAGE_BITS_ALL) { 1669 goto readrest; 1670 } 1671 if (fs->mary[0]->flags & PG_RAM) { 1672 if (debug_cluster) 1673 kprintf("R"); 1674 vm_page_flag_clear(fs->mary[0], PG_RAM); 1675 goto readrest; 1676 } 1677 } 1678 atomic_clear_int(&fs->first_ba->flags, 1679 VM_MAP_BACK_EXCL_HEUR); 1680 break; /* break to PAGE HAS BEEN FOUND */ 1681 } 1682 1683 /* 1684 * Page is not resident, If this is the search termination 1685 * or the pager might contain the page, allocate a new page. 1686 */ 1687 if (TRYPAGER(fs) || fs->ba == fs->first_ba) { 1688 /* 1689 * If this is a SWAP object we can use the shared 1690 * lock to check existence of a swap block. If 1691 * there isn't one we can skip to the next object. 1692 * 1693 * However, if this is the first object we allocate 1694 * a page now just in case we need to copy to it 1695 * later. 1696 */ 1697 if (fs->ba != fs->first_ba && 1698 fs->ba->object->type == OBJT_SWAP) { 1699 if (swap_pager_haspage_locked(fs->ba->object, 1700 pindex) == 0) { 1701 goto next; 1702 } 1703 } 1704 1705 /* 1706 * Allocating, must be exclusive. 1707 */ 1708 atomic_set_int(&fs->first_ba->flags, 1709 VM_MAP_BACK_EXCL_HEUR); 1710 if (fs->ba == fs->first_ba && fs->first_shared) { 1711 fs->first_shared = 0; 1712 vm_object_pip_wakeup(fs->first_ba->object); 1713 unlock_things(fs); 1714 return (KERN_TRY_AGAIN); 1715 } 1716 if (fs->ba != fs->first_ba && fs->shared) { 1717 fs->first_shared = 0; 1718 fs->shared = 0; 1719 vm_object_pip_wakeup(fs->first_ba->object); 1720 unlock_things(fs); 1721 return (KERN_TRY_AGAIN); 1722 } 1723 1724 /* 1725 * If the page is beyond the object size we fail 1726 */ 1727 if (pindex >= fs->ba->object->size) { 1728 vm_object_pip_wakeup(fs->first_ba->object); 1729 unlock_things(fs); 1730 return (KERN_PROTECTION_FAILURE); 1731 } 1732 1733 /* 1734 * Allocate a new page for this object/offset pair. 1735 * 1736 * It is possible for the allocation to race, so 1737 * handle the case. 1738 * 1739 * Does not apply to OBJT_MGTDEVICE (e.g. gpu / drm 1740 * subsystem). For OBJT_MGTDEVICE the pages are not 1741 * indexed in the VM object at all but instead directly 1742 * entered into the pmap. 1743 */ 1744 fs->mary[0] = NULL; 1745 if (fs->ba->object->type == OBJT_MGTDEVICE) 1746 goto readrest; 1747 1748 if (!vm_paging_severe()) { 1749 fs->mary[0] = vm_page_alloc(fs->ba->object, 1750 pindex, 1751 ((fs->vp || fs->ba->backing_ba) ? 1752 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL : 1753 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL | 1754 VM_ALLOC_USE_GD | VM_ALLOC_ZERO)); 1755 } 1756 if (fs->mary[0] == NULL) { 1757 vm_object_pip_wakeup(fs->first_ba->object); 1758 unlock_things(fs); 1759 if (allow_nofault == 0 || 1760 (curthread->td_flags & TDF_NOFAULT) == 0) { 1761 thread_t td; 1762 1763 vm_wait_pfault(); 1764 td = curthread; 1765 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1766 return (KERN_PROTECTION_FAILURE); 1767 } 1768 return (KERN_TRY_AGAIN); 1769 } 1770 1771 /* 1772 * Fall through to readrest. We have a new page which 1773 * will have to be paged (since m->valid will be 0). 1774 */ 1775 } 1776 1777 readrest: 1778 /* 1779 * We have found an invalid or partially valid page, a 1780 * page with a read-ahead mark which might be partially or 1781 * fully valid (and maybe dirty too), or we have allocated 1782 * a new page. 1783 * 1784 * Attempt to fault-in the page if there is a chance that the 1785 * pager has it, and potentially fault in additional pages 1786 * at the same time. 1787 * 1788 * If TRYPAGER is true then fs.mary[0] will be non-NULL and 1789 * busied for us. 1790 */ 1791 if (TRYPAGER(fs)) { 1792 u_char behavior = vm_map_entry_behavior(fs->entry); 1793 vm_object_t object; 1794 vm_page_t first_m; 1795 int seqaccess; 1796 int rv; 1797 1798 if (behavior == MAP_ENTRY_BEHAV_RANDOM) 1799 seqaccess = 0; 1800 else 1801 seqaccess = -1; 1802 1803 /* 1804 * Doing I/O may synchronously insert additional 1805 * pages so we can't be shared at this point either. 1806 * 1807 * NOTE: We can't free fs->mary[0] here in the 1808 * allocated case (fs->ba != fs->first_ba) as 1809 * this would require an exclusively locked 1810 * VM object. 1811 */ 1812 if (fs->ba == fs->first_ba && fs->first_shared) { 1813 if (fs->mary[0]) { 1814 vm_page_deactivate(fs->mary[0]); 1815 vm_page_wakeup(fs->mary[0]); 1816 fs->mary[0]= NULL; 1817 } 1818 fs->first_shared = 0; 1819 vm_object_pip_wakeup(fs->first_ba->object); 1820 unlock_things(fs); 1821 return (KERN_TRY_AGAIN); 1822 } 1823 if (fs->ba != fs->first_ba && fs->shared) { 1824 if (fs->mary[0]) { 1825 vm_page_deactivate(fs->mary[0]); 1826 vm_page_wakeup(fs->mary[0]); 1827 fs->mary[0] = NULL; 1828 } 1829 fs->first_shared = 0; 1830 fs->shared = 0; 1831 vm_object_pip_wakeup(fs->first_ba->object); 1832 unlock_things(fs); 1833 return (KERN_TRY_AGAIN); 1834 } 1835 1836 object = fs->ba->object; 1837 first_m = NULL; 1838 1839 /* object is held, no more access to entry or ba's */ 1840 1841 /* 1842 * Acquire the page data. We still hold object 1843 * and the page has been BUSY's. 1844 * 1845 * We own the page, but we must re-issue the lookup 1846 * because the pager may have replaced it (for example, 1847 * in order to enter a fictitious page into the 1848 * object). In this situation the pager will have 1849 * cleaned up the old page and left the new one 1850 * busy for us. 1851 * 1852 * If we got here through a PG_RAM read-ahead 1853 * mark the page may be partially dirty and thus 1854 * not freeable. Don't bother checking to see 1855 * if the pager has the page because we can't free 1856 * it anyway. We have to depend on the get_page 1857 * operation filling in any gaps whether there is 1858 * backing store or not. 1859 * 1860 * We must dispose of the page (fs->mary[0]) and also 1861 * possibly first_m (the fronting layer). If 1862 * this is a write fault leave the page intact 1863 * because we will probably have to copy fs->mary[0] 1864 * to fs->first_m on the retry. If this is a 1865 * read fault we probably won't need the page. 1866 * 1867 * For OBJT_MGTDEVICE (and eventually all types), 1868 * fs->mary[0] is not pre-allocated and may be set 1869 * to a vm_page (busied for us) without being inserted 1870 * into the object. In this case we want to return 1871 * the vm_page directly so the caller can issue the 1872 * pmap_enter(). 1873 */ 1874 rv = vm_pager_get_page(object, pindex, 1875 &fs->mary[0], seqaccess); 1876 1877 if (rv == VM_PAGER_OK) { 1878 ++fs->hardfault; 1879 if (object->type == OBJT_MGTDEVICE) { 1880 break; 1881 } 1882 1883 fs->mary[0] = vm_page_lookup(object, pindex); 1884 if (fs->mary[0]) { 1885 vm_page_activate(fs->mary[0]); 1886 vm_page_wakeup(fs->mary[0]); 1887 fs->mary[0] = NULL; 1888 } 1889 1890 if (fs->mary[0]) { 1891 /* NOT REACHED */ 1892 /* have page */ 1893 break; 1894 } 1895 vm_object_pip_wakeup(fs->first_ba->object); 1896 unlock_things(fs); 1897 return (KERN_TRY_AGAIN); 1898 } 1899 1900 /* 1901 * If the pager doesn't have the page, continue on 1902 * to the next object. Retain the vm_page if this 1903 * is the first object, we may need to copy into 1904 * it later. 1905 */ 1906 if (rv == VM_PAGER_FAIL) { 1907 if (fs->ba != fs->first_ba) { 1908 if (fs->mary[0]) { 1909 vm_page_free(fs->mary[0]); 1910 fs->mary[0] = NULL; 1911 } 1912 } 1913 goto next; 1914 } 1915 1916 /* 1917 * Remove the bogus page (which does not exist at this 1918 * object/offset). 1919 * 1920 * Also wake up any other process that may want to bring 1921 * in this page. 1922 * 1923 * If this is the top-level object, we must leave the 1924 * busy page to prevent another process from rushing 1925 * past us, and inserting the page in that object at 1926 * the same time that we are. 1927 */ 1928 if (rv == VM_PAGER_ERROR) { 1929 if (curproc) { 1930 kprintf("vm_fault: pager read error, " 1931 "pid %d (%s)\n", 1932 curproc->p_pid, 1933 curproc->p_comm); 1934 } else { 1935 kprintf("vm_fault: pager read error, " 1936 "thread %p (%s)\n", 1937 curthread, 1938 curthread->td_comm); 1939 } 1940 } 1941 1942 /* 1943 * I/O error or data outside pager's range. 1944 */ 1945 if (fs->mary[0]) { 1946 vnode_pager_freepage(fs->mary[0]); 1947 fs->mary[0] = NULL; 1948 } 1949 if (first_m) { 1950 vm_page_free(first_m); 1951 first_m = NULL; /* safety */ 1952 } 1953 vm_object_pip_wakeup(object); 1954 unlock_things(fs); 1955 1956 switch(rv) { 1957 case VM_PAGER_ERROR: 1958 return (KERN_FAILURE); 1959 case VM_PAGER_BAD: 1960 return (KERN_PROTECTION_FAILURE); 1961 default: 1962 return (KERN_PROTECTION_FAILURE); 1963 } 1964 1965 #if 0 1966 /* 1967 * Data outside the range of the pager or an I/O error 1968 * 1969 * The page may have been wired during the pagein, 1970 * e.g. by the buffer cache, and cannot simply be 1971 * freed. Call vnode_pager_freepage() to deal with it. 1972 * 1973 * The object is not held shared so we can safely 1974 * free the page. 1975 */ 1976 if (fs->ba != fs->first_ba) { 1977 1978 /* 1979 * XXX - we cannot just fall out at this 1980 * point, m has been freed and is invalid! 1981 */ 1982 } 1983 1984 /* 1985 * XXX - the check for kernel_map is a kludge to work 1986 * around having the machine panic on a kernel space 1987 * fault w/ I/O error. 1988 */ 1989 if (((fs->map != kernel_map) && 1990 (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 1991 if (fs->m) { 1992 /* from just above */ 1993 KKASSERT(fs->first_shared == 0); 1994 vnode_pager_freepage(fs->m); 1995 fs->m = NULL; 1996 } 1997 /* NOT REACHED */ 1998 } 1999 #endif 2000 } 2001 2002 next: 2003 /* 2004 * We get here if the object has a default pager (or unwiring) 2005 * or the pager doesn't have the page. 2006 * 2007 * fs->first_m will be used for the COW unless we find a 2008 * deeper page to be mapped read-only, in which case the 2009 * unlock*(fs) will free first_m. 2010 */ 2011 if (fs->ba == fs->first_ba) 2012 fs->first_m = fs->mary[0]; 2013 2014 /* 2015 * Move on to the next object. The chain lock should prevent 2016 * the backing_object from getting ripped out from under us. 2017 * 2018 * The object lock for the next object is governed by 2019 * fs->shared. 2020 */ 2021 next_ba = fs->ba->backing_ba; 2022 if (next_ba == NULL) { 2023 /* 2024 * If there's no object left, fill the page in the top 2025 * object with zeros. 2026 */ 2027 if (fs->ba != fs->first_ba) { 2028 vm_object_pip_wakeup(fs->ba->object); 2029 vm_object_drop(fs->ba->object); 2030 fs->ba = fs->first_ba; 2031 pindex = first_pindex; 2032 fs->mary[0] = fs->first_m; 2033 } 2034 fs->first_m = NULL; 2035 2036 /* 2037 * Zero the page and mark it valid. 2038 */ 2039 vm_page_zero_fill(fs->mary[0]); 2040 mycpu->gd_cnt.v_zfod++; 2041 fs->mary[0]->valid = VM_PAGE_BITS_ALL; 2042 break; /* break to PAGE HAS BEEN FOUND */ 2043 } 2044 2045 if (fs->shared) 2046 vm_object_hold_shared(next_ba->object); 2047 else 2048 vm_object_hold(next_ba->object); 2049 KKASSERT(next_ba == fs->ba->backing_ba); 2050 pindex -= OFF_TO_IDX(fs->ba->offset); 2051 pindex += OFF_TO_IDX(next_ba->offset); 2052 2053 if (fs->ba != fs->first_ba) { 2054 vm_object_pip_wakeup(fs->ba->object); 2055 vm_object_lock_swap(); /* flip ba/next_ba */ 2056 vm_object_drop(fs->ba->object); 2057 } 2058 fs->ba = next_ba; 2059 vm_object_pip_add(next_ba->object, 1); 2060 } 2061 2062 /* 2063 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 2064 * is held.] 2065 * 2066 * object still held. 2067 * vm_map may not be locked (determined by fs->lookup_still_valid) 2068 * 2069 * local shared variable may be different from fs->shared. 2070 * 2071 * If the page is being written, but isn't already owned by the 2072 * top-level object, we have to copy it into a new page owned by the 2073 * top-level object. 2074 */ 2075 KASSERT((fs->mary[0]->busy_count & PBUSY_LOCKED) != 0, 2076 ("vm_fault: not busy after main loop")); 2077 2078 if (fs->ba != fs->first_ba) { 2079 /* 2080 * We only really need to copy if we want to write it. 2081 */ 2082 if (fault_type & VM_PROT_WRITE) { 2083 #if 0 2084 /* CODE REFACTOR IN PROGRESS, REMOVE OPTIMIZATION */ 2085 /* 2086 * This allows pages to be virtually copied from a 2087 * backing_object into the first_object, where the 2088 * backing object has no other refs to it, and cannot 2089 * gain any more refs. Instead of a bcopy, we just 2090 * move the page from the backing object to the 2091 * first object. Note that we must mark the page 2092 * dirty in the first object so that it will go out 2093 * to swap when needed. 2094 */ 2095 if (virtual_copy_ok(fs)) { 2096 /* 2097 * (first_m) and (m) are both busied. We have 2098 * move (m) into (first_m)'s object/pindex 2099 * in an atomic fashion, then free (first_m). 2100 * 2101 * first_object is held so second remove 2102 * followed by the rename should wind 2103 * up being atomic. vm_page_free() might 2104 * block so we don't do it until after the 2105 * rename. 2106 */ 2107 vm_page_protect(fs->first_m, VM_PROT_NONE); 2108 vm_page_remove(fs->first_m); 2109 vm_page_rename(fs->mary[0], 2110 fs->first_ba->object, 2111 first_pindex); 2112 vm_page_free(fs->first_m); 2113 fs->first_m = fs->mary[0]; 2114 fs->mary[0] = NULL; 2115 mycpu->gd_cnt.v_cow_optim++; 2116 } else 2117 #endif 2118 { 2119 /* 2120 * Oh, well, lets copy it. 2121 * 2122 * We used to unmap the original page here 2123 * because vm_fault_page() didn't and this 2124 * would cause havoc for the umtx*() code 2125 * and the procfs code. 2126 * 2127 * This is no longer necessary. The 2128 * vm_fault_page() routine will now unmap the 2129 * page after a COW, and the umtx code will 2130 * recover on its own. 2131 */ 2132 /* 2133 * NOTE: Since fs->mary[0] is a backing page, 2134 * it is read-only, so there isn't any 2135 * copy race vs writers. 2136 */ 2137 KKASSERT(fs->first_shared == 0); 2138 vm_page_copy(fs->mary[0], fs->first_m); 2139 /* pmap_remove_specific( 2140 &curthread->td_lwp->lwp_vmspace->vm_pmap, 2141 fs->mary[0]); */ 2142 } 2143 2144 /* 2145 * We no longer need the old page or object. 2146 */ 2147 if (fs->mary[0]) 2148 release_page(fs); 2149 2150 /* 2151 * fs->ba != fs->first_ba due to above conditional 2152 */ 2153 vm_object_pip_wakeup(fs->ba->object); 2154 vm_object_drop(fs->ba->object); 2155 fs->ba = fs->first_ba; 2156 2157 /* 2158 * Only use the new page below... 2159 */ 2160 mycpu->gd_cnt.v_cow_faults++; 2161 fs->mary[0] = fs->first_m; 2162 pindex = first_pindex; 2163 } else { 2164 /* 2165 * If it wasn't a write fault avoid having to copy 2166 * the page by mapping it read-only from backing 2167 * store. The process is not allowed to modify 2168 * backing pages. 2169 */ 2170 fs->prot &= ~VM_PROT_WRITE; 2171 } 2172 } 2173 2174 /* 2175 * Relock the map if necessary, then check the generation count. 2176 * relock_map() will update fs->timestamp to account for the 2177 * relocking if necessary. 2178 * 2179 * If the count has changed after relocking then all sorts of 2180 * crap may have happened and we have to retry. 2181 * 2182 * NOTE: The relock_map() can fail due to a deadlock against 2183 * the vm_page we are holding BUSY. 2184 */ 2185 KKASSERT(fs->lookup_still_valid != 0); 2186 #if 0 2187 if (fs->lookup_still_valid == 0 && fs->map) { 2188 if (relock_map(fs) || 2189 fs->map->timestamp != fs->map_generation) { 2190 release_page(fs); 2191 vm_object_pip_wakeup(fs->first_ba->object); 2192 unlock_things(fs); 2193 return (KERN_TRY_AGAIN); 2194 } 2195 } 2196 #endif 2197 2198 /* 2199 * If the fault is a write, we know that this page is being 2200 * written NOW so dirty it explicitly to save on pmap_is_modified() 2201 * calls later. 2202 * 2203 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC 2204 * if the page is already dirty to prevent data written with 2205 * the expectation of being synced from not being synced. 2206 * Likewise if this entry does not request NOSYNC then make 2207 * sure the page isn't marked NOSYNC. Applications sharing 2208 * data should use the same flags to avoid ping ponging. 2209 * 2210 * Also tell the backing pager, if any, that it should remove 2211 * any swap backing since the page is now dirty. 2212 */ 2213 vm_page_activate(fs->mary[0]); 2214 if (fs->prot & VM_PROT_WRITE) { 2215 vm_object_set_writeable_dirty(fs->first_ba->object); 2216 vm_set_nosync(fs->mary[0], fs->entry); 2217 if (fs->fault_flags & VM_FAULT_DIRTY) { 2218 vm_page_dirty(fs->mary[0]); 2219 if (fs->mary[0]->flags & PG_SWAPPED) { 2220 /* 2221 * If the page is swapped out we have to call 2222 * swap_pager_unswapped() which requires an 2223 * exclusive object lock. If we are shared, 2224 * we must clear the shared flag and retry. 2225 */ 2226 if ((fs->ba == fs->first_ba && 2227 fs->first_shared) || 2228 (fs->ba != fs->first_ba && fs->shared)) { 2229 vm_page_wakeup(fs->mary[0]); 2230 fs->mary[0] = NULL; 2231 if (fs->ba == fs->first_ba) 2232 fs->first_shared = 0; 2233 else 2234 fs->shared = 0; 2235 vm_object_pip_wakeup( 2236 fs->first_ba->object); 2237 unlock_things(fs); 2238 return (KERN_TRY_AGAIN); 2239 } 2240 swap_pager_unswapped(fs->mary[0]); 2241 } 2242 } 2243 } 2244 2245 /* 2246 * We found our page at backing layer ba. Leave the layer state 2247 * intact. 2248 */ 2249 2250 vm_object_pip_wakeup(fs->first_ba->object); 2251 #if 0 2252 if (fs->ba != fs->first_ba) 2253 vm_object_drop(fs->ba->object); 2254 #endif 2255 2256 /* 2257 * Page had better still be busy. We are still locked up and 2258 * fs->ba->object will have another PIP reference for the case 2259 * where fs->ba != fs->first_ba. 2260 */ 2261 KASSERT(fs->mary[0]->busy_count & PBUSY_LOCKED, 2262 ("vm_fault: page %p not busy!", fs->mary[0])); 2263 2264 /* 2265 * Sanity check: page must be completely valid or it is not fit to 2266 * map into user space. vm_pager_get_pages() ensures this. 2267 */ 2268 if (fs->mary[0]->valid != VM_PAGE_BITS_ALL) { 2269 vm_page_zero_invalid(fs->mary[0], TRUE); 2270 kprintf("Warning: page %p partially invalid on fault\n", 2271 fs->mary[0]); 2272 } 2273 2274 return (KERN_SUCCESS); 2275 } 2276 2277 /* 2278 * Wire down a range of virtual addresses in a map. The entry in question 2279 * should be marked in-transition and the map must be locked. We must 2280 * release the map temporarily while faulting-in the page to avoid a 2281 * deadlock. Note that the entry may be clipped while we are blocked but 2282 * will never be freed. 2283 * 2284 * map must be locked on entry. 2285 */ 2286 int 2287 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, 2288 boolean_t user_wire, int kmflags) 2289 { 2290 boolean_t fictitious; 2291 vm_offset_t start; 2292 vm_offset_t end; 2293 vm_offset_t va; 2294 pmap_t pmap; 2295 int rv; 2296 int wire_prot; 2297 int fault_flags; 2298 vm_page_t m; 2299 2300 if (user_wire) { 2301 wire_prot = VM_PROT_READ; 2302 fault_flags = VM_FAULT_USER_WIRE; 2303 } else { 2304 wire_prot = VM_PROT_READ | VM_PROT_WRITE; 2305 fault_flags = VM_FAULT_CHANGE_WIRING; 2306 } 2307 if (kmflags & KM_NOTLBSYNC) 2308 wire_prot |= VM_PROT_NOSYNC; 2309 2310 pmap = vm_map_pmap(map); 2311 start = entry->ba.start; 2312 end = entry->ba.end; 2313 2314 switch(entry->maptype) { 2315 case VM_MAPTYPE_NORMAL: 2316 fictitious = entry->ba.object && 2317 ((entry->ba.object->type == OBJT_DEVICE) || 2318 (entry->ba.object->type == OBJT_MGTDEVICE)); 2319 break; 2320 case VM_MAPTYPE_UKSMAP: 2321 fictitious = TRUE; 2322 break; 2323 default: 2324 fictitious = FALSE; 2325 break; 2326 } 2327 2328 if (entry->eflags & MAP_ENTRY_KSTACK) 2329 start += PAGE_SIZE; 2330 map->timestamp++; 2331 vm_map_unlock(map); 2332 2333 /* 2334 * We simulate a fault to get the page and enter it in the physical 2335 * map. 2336 */ 2337 for (va = start; va < end; va += PAGE_SIZE) { 2338 rv = vm_fault(map, va, wire_prot, fault_flags); 2339 if (rv) { 2340 while (va > start) { 2341 va -= PAGE_SIZE; 2342 m = pmap_unwire(pmap, va); 2343 if (m && !fictitious) { 2344 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2345 vm_page_unwire(m, 1); 2346 vm_page_wakeup(m); 2347 } 2348 } 2349 goto done; 2350 } 2351 } 2352 rv = KERN_SUCCESS; 2353 done: 2354 vm_map_lock(map); 2355 2356 return (rv); 2357 } 2358 2359 /* 2360 * Unwire a range of virtual addresses in a map. The map should be 2361 * locked. 2362 */ 2363 void 2364 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry) 2365 { 2366 boolean_t fictitious; 2367 vm_offset_t start; 2368 vm_offset_t end; 2369 vm_offset_t va; 2370 pmap_t pmap; 2371 vm_page_t m; 2372 2373 pmap = vm_map_pmap(map); 2374 start = entry->ba.start; 2375 end = entry->ba.end; 2376 fictitious = entry->ba.object && 2377 ((entry->ba.object->type == OBJT_DEVICE) || 2378 (entry->ba.object->type == OBJT_MGTDEVICE)); 2379 if (entry->eflags & MAP_ENTRY_KSTACK) 2380 start += PAGE_SIZE; 2381 2382 /* 2383 * Since the pages are wired down, we must be able to get their 2384 * mappings from the physical map system. 2385 */ 2386 for (va = start; va < end; va += PAGE_SIZE) { 2387 m = pmap_unwire(pmap, va); 2388 if (m && !fictitious) { 2389 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2390 vm_page_unwire(m, 1); 2391 vm_page_wakeup(m); 2392 } 2393 } 2394 } 2395 2396 /* 2397 * Simulate write faults to bring all data into the head object, return 2398 * KERN_SUCCESS on success (which should be always unless the system runs 2399 * out of memory). 2400 * 2401 * The caller will handle destroying the backing_ba's. 2402 */ 2403 int 2404 vm_fault_collapse(vm_map_t map, vm_map_entry_t entry) 2405 { 2406 struct faultstate fs; 2407 vm_ooffset_t scan; 2408 vm_pindex_t pindex; 2409 vm_object_t object; 2410 int rv; 2411 int all_shadowed; 2412 2413 bzero(&fs, sizeof(fs)); 2414 object = entry->ba.object; 2415 2416 fs.first_prot = entry->max_protection | /* optional VM_PROT_EXECUTE */ 2417 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE; 2418 fs.fault_flags = VM_FAULT_NORMAL; 2419 fs.map = map; 2420 fs.entry = entry; 2421 fs.lookup_still_valid = -1; /* leave map atomically locked */ 2422 fs.first_ba = &entry->ba; 2423 fs.first_ba_held = -1; /* leave object held */ 2424 2425 /* fs.hardfault */ 2426 2427 vm_object_hold(object); 2428 rv = KERN_SUCCESS; 2429 2430 scan = entry->ba.start; 2431 all_shadowed = 1; 2432 2433 while (scan < entry->ba.end) { 2434 pindex = OFF_TO_IDX(entry->ba.offset + (scan - entry->ba.start)); 2435 2436 if (vm_page_lookup(object, pindex)) { 2437 scan += PAGE_SIZE; 2438 continue; 2439 } 2440 2441 all_shadowed = 0; 2442 fs.ba = fs.first_ba; 2443 fs.prot = fs.first_prot; 2444 2445 rv = vm_fault_object(&fs, pindex, fs.first_prot, 1); 2446 if (rv == KERN_TRY_AGAIN) 2447 continue; 2448 if (rv != KERN_SUCCESS) 2449 break; 2450 vm_page_flag_set(fs.mary[0], PG_REFERENCED); 2451 vm_page_activate(fs.mary[0]); 2452 vm_page_wakeup(fs.mary[0]); 2453 scan += PAGE_SIZE; 2454 } 2455 KKASSERT(entry->ba.object == object); 2456 vm_object_drop(object); 2457 2458 /* 2459 * If the fronting object did not have every page we have to clear 2460 * the pmap range due to the pages being changed so we can fault-in 2461 * the proper pages. 2462 */ 2463 if (all_shadowed == 0) 2464 pmap_remove(map->pmap, entry->ba.start, entry->ba.end); 2465 2466 return rv; 2467 } 2468 2469 /* 2470 * Copy all of the pages from one map entry to another. If the source 2471 * is wired down we just use vm_page_lookup(). If not we use 2472 * vm_fault_object(). 2473 * 2474 * The source and destination maps must be locked for write. 2475 * The source and destination maps token must be held 2476 * 2477 * No other requirements. 2478 * 2479 * XXX do segment optimization 2480 */ 2481 void 2482 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 2483 vm_map_entry_t dst_entry, vm_map_entry_t src_entry) 2484 { 2485 vm_object_t dst_object; 2486 vm_object_t src_object; 2487 vm_ooffset_t dst_offset; 2488 vm_ooffset_t src_offset; 2489 vm_prot_t prot; 2490 vm_offset_t vaddr; 2491 vm_page_t dst_m; 2492 vm_page_t src_m; 2493 2494 src_object = src_entry->ba.object; 2495 src_offset = src_entry->ba.offset; 2496 2497 /* 2498 * Create the top-level object for the destination entry. (Doesn't 2499 * actually shadow anything - we copy the pages directly.) 2500 */ 2501 vm_map_entry_allocate_object(dst_entry); 2502 dst_object = dst_entry->ba.object; 2503 2504 prot = dst_entry->max_protection; 2505 2506 /* 2507 * Loop through all of the pages in the entry's range, copying each 2508 * one from the source object (it should be there) to the destination 2509 * object. 2510 */ 2511 vm_object_hold(src_object); 2512 vm_object_hold(dst_object); 2513 2514 for (vaddr = dst_entry->ba.start, dst_offset = 0; 2515 vaddr < dst_entry->ba.end; 2516 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 2517 2518 /* 2519 * Allocate a page in the destination object 2520 */ 2521 do { 2522 dst_m = vm_page_alloc(dst_object, 2523 OFF_TO_IDX(dst_offset), 2524 VM_ALLOC_NORMAL); 2525 if (dst_m == NULL) { 2526 vm_wait(0); 2527 } 2528 } while (dst_m == NULL); 2529 2530 /* 2531 * Find the page in the source object, and copy it in. 2532 * (Because the source is wired down, the page will be in 2533 * memory.) 2534 */ 2535 src_m = vm_page_lookup(src_object, 2536 OFF_TO_IDX(dst_offset + src_offset)); 2537 if (src_m == NULL) 2538 panic("vm_fault_copy_wired: page missing"); 2539 2540 vm_page_copy(src_m, dst_m); 2541 2542 /* 2543 * Enter it in the pmap... 2544 */ 2545 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry); 2546 2547 /* 2548 * Mark it no longer busy, and put it on the active list. 2549 */ 2550 vm_page_activate(dst_m); 2551 vm_page_wakeup(dst_m); 2552 } 2553 vm_object_drop(dst_object); 2554 vm_object_drop(src_object); 2555 } 2556 2557 #if 0 2558 2559 /* 2560 * This routine checks around the requested page for other pages that 2561 * might be able to be faulted in. This routine brackets the viable 2562 * pages for the pages to be paged in. 2563 * 2564 * Inputs: 2565 * m, rbehind, rahead 2566 * 2567 * Outputs: 2568 * marray (array of vm_page_t), reqpage (index of requested page) 2569 * 2570 * Return value: 2571 * number of pages in marray 2572 */ 2573 static int 2574 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead, 2575 vm_page_t *marray, int *reqpage) 2576 { 2577 int i,j; 2578 vm_object_t object; 2579 vm_pindex_t pindex, startpindex, endpindex, tpindex; 2580 vm_page_t rtm; 2581 int cbehind, cahead; 2582 2583 object = m->object; 2584 pindex = m->pindex; 2585 2586 /* 2587 * we don't fault-ahead for device pager 2588 */ 2589 if ((object->type == OBJT_DEVICE) || 2590 (object->type == OBJT_MGTDEVICE)) { 2591 *reqpage = 0; 2592 marray[0] = m; 2593 return 1; 2594 } 2595 2596 /* 2597 * if the requested page is not available, then give up now 2598 */ 2599 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 2600 *reqpage = 0; /* not used by caller, fix compiler warn */ 2601 return 0; 2602 } 2603 2604 if ((cbehind == 0) && (cahead == 0)) { 2605 *reqpage = 0; 2606 marray[0] = m; 2607 return 1; 2608 } 2609 2610 if (rahead > cahead) { 2611 rahead = cahead; 2612 } 2613 2614 if (rbehind > cbehind) { 2615 rbehind = cbehind; 2616 } 2617 2618 /* 2619 * Do not do any readahead if we have insufficient free memory. 2620 * 2621 * XXX code was broken disabled before and has instability 2622 * with this conditonal fixed, so shortcut for now. 2623 */ 2624 if (burst_fault == 0 || vm_page_count_severe()) { 2625 marray[0] = m; 2626 *reqpage = 0; 2627 return 1; 2628 } 2629 2630 /* 2631 * scan backward for the read behind pages -- in memory 2632 * 2633 * Assume that if the page is not found an interrupt will not 2634 * create it. Theoretically interrupts can only remove (busy) 2635 * pages, not create new associations. 2636 */ 2637 if (pindex > 0) { 2638 if (rbehind > pindex) { 2639 rbehind = pindex; 2640 startpindex = 0; 2641 } else { 2642 startpindex = pindex - rbehind; 2643 } 2644 2645 vm_object_hold(object); 2646 for (tpindex = pindex; tpindex > startpindex; --tpindex) { 2647 if (vm_page_lookup(object, tpindex - 1)) 2648 break; 2649 } 2650 2651 i = 0; 2652 while (tpindex < pindex) { 2653 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2654 VM_ALLOC_NULL_OK); 2655 if (rtm == NULL) { 2656 for (j = 0; j < i; j++) { 2657 vm_page_free(marray[j]); 2658 } 2659 vm_object_drop(object); 2660 marray[0] = m; 2661 *reqpage = 0; 2662 return 1; 2663 } 2664 marray[i] = rtm; 2665 ++i; 2666 ++tpindex; 2667 } 2668 vm_object_drop(object); 2669 } else { 2670 i = 0; 2671 } 2672 2673 /* 2674 * Assign requested page 2675 */ 2676 marray[i] = m; 2677 *reqpage = i; 2678 ++i; 2679 2680 /* 2681 * Scan forwards for read-ahead pages 2682 */ 2683 tpindex = pindex + 1; 2684 endpindex = tpindex + rahead; 2685 if (endpindex > object->size) 2686 endpindex = object->size; 2687 2688 vm_object_hold(object); 2689 while (tpindex < endpindex) { 2690 if (vm_page_lookup(object, tpindex)) 2691 break; 2692 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2693 VM_ALLOC_NULL_OK); 2694 if (rtm == NULL) 2695 break; 2696 marray[i] = rtm; 2697 ++i; 2698 ++tpindex; 2699 } 2700 vm_object_drop(object); 2701 2702 return (i); 2703 } 2704 2705 #endif 2706 2707 /* 2708 * vm_prefault() provides a quick way of clustering pagefaults into a 2709 * processes address space. It is a "cousin" of pmap_object_init_pt, 2710 * except it runs at page fault time instead of mmap time. 2711 * 2712 * vm.fast_fault Enables pre-faulting zero-fill pages 2713 * 2714 * vm.prefault_pages Number of pages (1/2 negative, 1/2 positive) to 2715 * prefault. Scan stops in either direction when 2716 * a page is found to already exist. 2717 * 2718 * This code used to be per-platform pmap_prefault(). It is now 2719 * machine-independent and enhanced to also pre-fault zero-fill pages 2720 * (see vm.fast_fault) as well as make them writable, which greatly 2721 * reduces the number of page faults programs incur. 2722 * 2723 * Application performance when pre-faulting zero-fill pages is heavily 2724 * dependent on the application. Very tiny applications like /bin/echo 2725 * lose a little performance while applications of any appreciable size 2726 * gain performance. Prefaulting multiple pages also reduces SMP 2727 * congestion and can improve SMP performance significantly. 2728 * 2729 * NOTE! prot may allow writing but this only applies to the top level 2730 * object. If we wind up mapping a page extracted from a backing 2731 * object we have to make sure it is read-only. 2732 * 2733 * NOTE! The caller has already handled any COW operations on the 2734 * vm_map_entry via the normal fault code. Do NOT call this 2735 * shortcut unless the normal fault code has run on this entry. 2736 * 2737 * The related map must be locked. 2738 * No other requirements. 2739 */ 2740 __read_mostly static int vm_prefault_pages = 8; 2741 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0, 2742 "Maximum number of pages to pre-fault"); 2743 __read_mostly static int vm_fast_fault = 1; 2744 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0, 2745 "Burst fault zero-fill regions"); 2746 2747 /* 2748 * Set PG_NOSYNC if the map entry indicates so, but only if the page 2749 * is not already dirty by other means. This will prevent passive 2750 * filesystem syncing as well as 'sync' from writing out the page. 2751 */ 2752 static void 2753 vm_set_nosync(vm_page_t m, vm_map_entry_t entry) 2754 { 2755 if (entry->eflags & MAP_ENTRY_NOSYNC) { 2756 if (m->dirty == 0) 2757 vm_page_flag_set(m, PG_NOSYNC); 2758 } else { 2759 vm_page_flag_clear(m, PG_NOSYNC); 2760 } 2761 } 2762 2763 static void 2764 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot, 2765 int fault_flags) 2766 { 2767 vm_map_backing_t ba; /* first ba */ 2768 struct lwp *lp; 2769 vm_page_t m; 2770 vm_offset_t addr; 2771 vm_pindex_t index; 2772 vm_pindex_t pindex; 2773 vm_object_t object; 2774 int pprot; 2775 int i; 2776 int noneg; 2777 int nopos; 2778 int maxpages; 2779 2780 /* 2781 * Get stable max count value, disabled if set to 0 2782 */ 2783 maxpages = vm_prefault_pages; 2784 cpu_ccfence(); 2785 if (maxpages <= 0) 2786 return; 2787 2788 /* 2789 * We do not currently prefault mappings that use virtual page 2790 * tables. We do not prefault foreign pmaps. 2791 */ 2792 if (entry->maptype != VM_MAPTYPE_NORMAL) 2793 return; 2794 lp = curthread->td_lwp; 2795 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 2796 return; 2797 2798 /* 2799 * Limit pre-fault count to 1024 pages. 2800 */ 2801 if (maxpages > 1024) 2802 maxpages = 1024; 2803 2804 ba = &entry->ba; 2805 object = entry->ba.object; 2806 KKASSERT(object != NULL); 2807 2808 /* 2809 * NOTE: VM_FAULT_DIRTY allowed later so must hold object exclusively 2810 * now (or do something more complex XXX). 2811 */ 2812 vm_object_hold(object); 2813 2814 noneg = 0; 2815 nopos = 0; 2816 for (i = 0; i < maxpages; ++i) { 2817 vm_object_t lobject; 2818 vm_object_t nobject; 2819 vm_map_backing_t last_ba; /* last ba */ 2820 vm_map_backing_t next_ba; /* last ba */ 2821 int allocated = 0; 2822 int error; 2823 2824 /* 2825 * This can eat a lot of time on a heavily contended 2826 * machine so yield on the tick if needed. 2827 */ 2828 if ((i & 7) == 7) 2829 lwkt_yield(); 2830 2831 /* 2832 * Calculate the page to pre-fault, stopping the scan in 2833 * each direction separately if the limit is reached. 2834 */ 2835 if (i & 1) { 2836 if (noneg) 2837 continue; 2838 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 2839 } else { 2840 if (nopos) 2841 continue; 2842 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 2843 } 2844 if (addr < entry->ba.start) { 2845 noneg = 1; 2846 if (noneg && nopos) 2847 break; 2848 continue; 2849 } 2850 if (addr >= entry->ba.end) { 2851 nopos = 1; 2852 if (noneg && nopos) 2853 break; 2854 continue; 2855 } 2856 2857 /* 2858 * Skip pages already mapped, and stop scanning in that 2859 * direction. When the scan terminates in both directions 2860 * we are done. 2861 */ 2862 if (pmap_prefault_ok(pmap, addr) == 0) { 2863 if (i & 1) 2864 noneg = 1; 2865 else 2866 nopos = 1; 2867 if (noneg && nopos) 2868 break; 2869 continue; 2870 } 2871 2872 /* 2873 * Follow the backing layers to obtain the page to be mapped 2874 * into the pmap. 2875 * 2876 * If we reach the terminal object without finding a page 2877 * and we determine it would be advantageous, then allocate 2878 * a zero-fill page for the base object. The base object 2879 * is guaranteed to be OBJT_DEFAULT for this case. 2880 * 2881 * In order to not have to check the pager via *haspage*() 2882 * we stop if any non-default object is encountered. e.g. 2883 * a vnode or swap object would stop the loop. 2884 */ 2885 index = ((addr - entry->ba.start) + entry->ba.offset) >> 2886 PAGE_SHIFT; 2887 last_ba = ba; 2888 lobject = object; 2889 pindex = index; 2890 pprot = prot; 2891 2892 /*vm_object_hold(lobject); implied */ 2893 2894 while ((m = vm_page_lookup_busy_try(lobject, pindex, 2895 TRUE, &error)) == NULL) { 2896 if (lobject->type != OBJT_DEFAULT) 2897 break; 2898 if ((next_ba = last_ba->backing_ba) == NULL) { 2899 if (vm_fast_fault == 0) 2900 break; 2901 if ((prot & VM_PROT_WRITE) == 0 || 2902 vm_paging_min()) { 2903 break; 2904 } 2905 2906 /* 2907 * NOTE: Allocated from base object 2908 */ 2909 m = vm_page_alloc(object, index, 2910 VM_ALLOC_NORMAL | 2911 VM_ALLOC_ZERO | 2912 VM_ALLOC_USE_GD | 2913 VM_ALLOC_NULL_OK); 2914 if (m == NULL) 2915 break; 2916 allocated = 1; 2917 pprot = prot; 2918 /* lobject = object .. not needed */ 2919 break; 2920 } 2921 if (next_ba->offset & PAGE_MASK) 2922 break; 2923 nobject = next_ba->object; 2924 vm_object_hold(nobject); 2925 pindex -= last_ba->offset >> PAGE_SHIFT; 2926 pindex += next_ba->offset >> PAGE_SHIFT; 2927 if (last_ba != ba) { 2928 vm_object_lock_swap(); 2929 vm_object_drop(lobject); 2930 } 2931 lobject = nobject; 2932 last_ba = next_ba; 2933 pprot &= ~VM_PROT_WRITE; 2934 } 2935 2936 /* 2937 * NOTE: A non-NULL (m) will be associated with lobject if 2938 * it was found there, otherwise it is probably a 2939 * zero-fill page associated with the base object. 2940 * 2941 * Give-up if no page is available. 2942 */ 2943 if (m == NULL) { 2944 if (last_ba != ba) 2945 vm_object_drop(lobject); 2946 break; 2947 } 2948 2949 /* 2950 * The object must be marked dirty if we are mapping a 2951 * writable page. Note that (m) does not have to be 2952 * entered into the object, so use lobject or object 2953 * as appropriate instead of m->object. 2954 * 2955 * Do this before we potentially drop the object. 2956 */ 2957 if (pprot & VM_PROT_WRITE) { 2958 vm_object_set_writeable_dirty( 2959 (allocated ? object : lobject)); 2960 } 2961 2962 /* 2963 * Do not conditionalize on PG_RAM. If pages are present in 2964 * the VM system we assume optimal caching. If caching is 2965 * not optimal the I/O gravy train will be restarted when we 2966 * hit an unavailable page. We do not want to try to restart 2967 * the gravy train now because we really don't know how much 2968 * of the object has been cached. The cost for restarting 2969 * the gravy train should be low (since accesses will likely 2970 * be I/O bound anyway). 2971 */ 2972 if (last_ba != ba) 2973 vm_object_drop(lobject); 2974 2975 /* 2976 * Enter the page into the pmap if appropriate. If we had 2977 * allocated the page we have to place it on a queue. If not 2978 * we just have to make sure it isn't on the cache queue 2979 * (pages on the cache queue are not allowed to be mapped). 2980 * 2981 * When allocated is TRUE, m corresponds to object, 2982 * not lobject. 2983 */ 2984 if (allocated) { 2985 /* 2986 * Page must be zerod. 2987 */ 2988 vm_page_zero_fill(m); 2989 mycpu->gd_cnt.v_zfod++; 2990 m->valid = VM_PAGE_BITS_ALL; 2991 2992 /* 2993 * Handle dirty page case 2994 */ 2995 if (pprot & VM_PROT_WRITE) 2996 vm_set_nosync(m, entry); 2997 pmap_enter(pmap, addr, m, pprot, 0, entry); 2998 #if 0 2999 /* REMOVE ME, a burst counts as one fault */ 3000 mycpu->gd_cnt.v_vm_faults++; 3001 if (curthread->td_lwp) 3002 ++curthread->td_lwp->lwp_ru.ru_minflt; 3003 #endif 3004 vm_page_deactivate(m); 3005 if (pprot & VM_PROT_WRITE) { 3006 /*vm_object_set_writeable_dirty(object);*/ 3007 vm_set_nosync(m, entry); 3008 if (fault_flags & VM_FAULT_DIRTY) { 3009 vm_page_dirty(m); 3010 /*XXX*/ 3011 swap_pager_unswapped(m); 3012 } 3013 } 3014 vm_page_wakeup(m); 3015 } else if (error) { 3016 /* couldn't busy page, no wakeup */ 3017 } else if ( 3018 ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 3019 (m->flags & PG_FICTITIOUS) == 0) { 3020 /* 3021 * A fully valid page not undergoing soft I/O can 3022 * be immediately entered into the pmap. 3023 * 3024 * When allocated is false, m corresponds to lobject. 3025 */ 3026 if ((m->queue - m->pc) == PQ_CACHE) 3027 vm_page_deactivate(m); 3028 if (pprot & VM_PROT_WRITE) { 3029 /*vm_object_set_writeable_dirty(lobject);*/ 3030 vm_set_nosync(m, entry); 3031 if (fault_flags & VM_FAULT_DIRTY) { 3032 vm_page_dirty(m); 3033 /*XXX*/ 3034 swap_pager_unswapped(m); 3035 } 3036 } 3037 if (pprot & VM_PROT_WRITE) 3038 vm_set_nosync(m, entry); 3039 pmap_enter(pmap, addr, m, pprot, 0, entry); 3040 #if 0 3041 /* REMOVE ME, a burst counts as one fault */ 3042 mycpu->gd_cnt.v_vm_faults++; 3043 if (curthread->td_lwp) 3044 ++curthread->td_lwp->lwp_ru.ru_minflt; 3045 #endif 3046 vm_page_wakeup(m); 3047 } else { 3048 vm_page_wakeup(m); 3049 } 3050 } 3051 vm_object_drop(object); 3052 } 3053 3054 /* 3055 * Object can be held shared 3056 */ 3057 static void 3058 vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 3059 vm_map_entry_t entry, int prot, int fault_flags) 3060 { 3061 struct lwp *lp; 3062 vm_page_t m; 3063 vm_offset_t addr; 3064 vm_pindex_t pindex; 3065 vm_object_t object; 3066 int i; 3067 int noneg; 3068 int nopos; 3069 int maxpages; 3070 3071 /* 3072 * Get stable max count value, disabled if set to 0 3073 */ 3074 maxpages = vm_prefault_pages; 3075 cpu_ccfence(); 3076 if (maxpages <= 0) 3077 return; 3078 3079 /* 3080 * We do not currently prefault mappings that use virtual page 3081 * tables. We do not prefault foreign pmaps. 3082 */ 3083 if (entry->maptype != VM_MAPTYPE_NORMAL) 3084 return; 3085 lp = curthread->td_lwp; 3086 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 3087 return; 3088 object = entry->ba.object; 3089 if (entry->ba.backing_ba != NULL) 3090 return; 3091 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 3092 3093 /* 3094 * Limit pre-fault count to 1024 pages. 3095 */ 3096 if (maxpages > 1024) 3097 maxpages = 1024; 3098 3099 noneg = 0; 3100 nopos = 0; 3101 for (i = 0; i < maxpages; ++i) { 3102 int error; 3103 3104 /* 3105 * Calculate the page to pre-fault, stopping the scan in 3106 * each direction separately if the limit is reached. 3107 */ 3108 if (i & 1) { 3109 if (noneg) 3110 continue; 3111 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 3112 } else { 3113 if (nopos) 3114 continue; 3115 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 3116 } 3117 if (addr < entry->ba.start) { 3118 noneg = 1; 3119 if (noneg && nopos) 3120 break; 3121 continue; 3122 } 3123 if (addr >= entry->ba.end) { 3124 nopos = 1; 3125 if (noneg && nopos) 3126 break; 3127 continue; 3128 } 3129 3130 /* 3131 * Follow the VM object chain to obtain the page to be mapped 3132 * into the pmap. This version of the prefault code only 3133 * works with terminal objects. 3134 * 3135 * The page must already exist. If we encounter a problem 3136 * we stop here. 3137 * 3138 * WARNING! We cannot call swap_pager_unswapped() or insert 3139 * a new vm_page with a shared token. 3140 */ 3141 pindex = ((addr - entry->ba.start) + entry->ba.offset) >> 3142 PAGE_SHIFT; 3143 3144 /* 3145 * Skip pages already mapped, and stop scanning in that 3146 * direction. When the scan terminates in both directions 3147 * we are done. 3148 */ 3149 if (pmap_prefault_ok(pmap, addr) == 0) { 3150 if (i & 1) 3151 noneg = 1; 3152 else 3153 nopos = 1; 3154 if (noneg && nopos) 3155 break; 3156 continue; 3157 } 3158 3159 /* 3160 * Shortcut the read-only mapping case using the far more 3161 * efficient vm_page_lookup_sbusy_try() function. This 3162 * allows us to acquire the page soft-busied only which 3163 * is especially nice for concurrent execs of the same 3164 * program. 3165 * 3166 * The lookup function also validates page suitability 3167 * (all valid bits set, and not fictitious). 3168 * 3169 * If the page is in PQ_CACHE we have to fall-through 3170 * and hard-busy it so we can move it out of PQ_CACHE. 3171 */ 3172 if ((prot & VM_PROT_WRITE) == 0) { 3173 m = vm_page_lookup_sbusy_try(object, pindex, 3174 0, PAGE_SIZE); 3175 if (m == NULL) 3176 break; 3177 if ((m->queue - m->pc) != PQ_CACHE) { 3178 pmap_enter(pmap, addr, m, prot, 0, entry); 3179 #if 0 3180 /* REMOVE ME, a burst counts as one fault */ 3181 mycpu->gd_cnt.v_vm_faults++; 3182 if (curthread->td_lwp) 3183 ++curthread->td_lwp->lwp_ru.ru_minflt; 3184 #endif 3185 vm_page_sbusy_drop(m); 3186 continue; 3187 } 3188 vm_page_sbusy_drop(m); 3189 } 3190 3191 /* 3192 * Fallback to normal vm_page lookup code. This code 3193 * hard-busies the page. Not only that, but the page 3194 * can remain in that state for a significant period 3195 * time due to pmap_enter()'s overhead. 3196 */ 3197 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error); 3198 if (m == NULL || error) 3199 break; 3200 3201 /* 3202 * Stop if the page cannot be trivially entered into the 3203 * pmap. 3204 */ 3205 if (((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) || 3206 (m->flags & PG_FICTITIOUS) || 3207 ((m->flags & PG_SWAPPED) && 3208 (prot & VM_PROT_WRITE) && 3209 (fault_flags & VM_FAULT_DIRTY))) { 3210 vm_page_wakeup(m); 3211 break; 3212 } 3213 3214 /* 3215 * Enter the page into the pmap. The object might be held 3216 * shared so we can't do any (serious) modifying operation 3217 * on it. 3218 */ 3219 if ((m->queue - m->pc) == PQ_CACHE) 3220 vm_page_deactivate(m); 3221 if (prot & VM_PROT_WRITE) { 3222 vm_object_set_writeable_dirty(m->object); 3223 vm_set_nosync(m, entry); 3224 if (fault_flags & VM_FAULT_DIRTY) { 3225 vm_page_dirty(m); 3226 /* can't happeen due to conditional above */ 3227 /* swap_pager_unswapped(m); */ 3228 } 3229 } 3230 pmap_enter(pmap, addr, m, prot, 0, entry); 3231 #if 0 3232 /* REMOVE ME, a burst counts as one fault */ 3233 mycpu->gd_cnt.v_vm_faults++; 3234 if (curthread->td_lwp) 3235 ++curthread->td_lwp->lwp_ru.ru_minflt; 3236 #endif 3237 vm_page_wakeup(m); 3238 } 3239 } 3240