1 /* 2 * Copyright (c) 2003-2014 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * --- 35 * 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * Copyright (c) 1994 John S. Dyson 39 * All rights reserved. 40 * Copyright (c) 1994 David Greenman 41 * All rights reserved. 42 * 43 * 44 * This code is derived from software contributed to Berkeley by 45 * The Mach Operating System project at Carnegie-Mellon University. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * --- 72 * 73 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 74 * All rights reserved. 75 * 76 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 77 * 78 * Permission to use, copy, modify and distribute this software and 79 * its documentation is hereby granted, provided that both the copyright 80 * notice and this permission notice appear in all copies of the 81 * software, derivative works or modified versions, and any portions 82 * thereof, and that both notices appear in supporting documentation. 83 * 84 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 85 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 86 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 87 * 88 * Carnegie Mellon requests users of this software to return to 89 * 90 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 91 * School of Computer Science 92 * Carnegie Mellon University 93 * Pittsburgh PA 15213-3890 94 * 95 * any improvements or extensions that they make and grant Carnegie the 96 * rights to redistribute these changes. 97 */ 98 99 /* 100 * Page fault handling module. 101 */ 102 103 #include <sys/param.h> 104 #include <sys/systm.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/vnode.h> 108 #include <sys/resourcevar.h> 109 #include <sys/vmmeter.h> 110 #include <sys/vkernel.h> 111 #include <sys/lock.h> 112 #include <sys/sysctl.h> 113 114 #include <cpu/lwbuf.h> 115 116 #include <vm/vm.h> 117 #include <vm/vm_param.h> 118 #include <vm/pmap.h> 119 #include <vm/vm_map.h> 120 #include <vm/vm_object.h> 121 #include <vm/vm_page.h> 122 #include <vm/vm_pageout.h> 123 #include <vm/vm_kern.h> 124 #include <vm/vm_pager.h> 125 #include <vm/vnode_pager.h> 126 #include <vm/vm_extern.h> 127 128 #include <sys/thread2.h> 129 #include <vm/vm_page2.h> 130 131 struct faultstate { 132 vm_page_t m; 133 vm_object_t object; 134 vm_pindex_t pindex; 135 vm_prot_t prot; 136 vm_page_t first_m; 137 vm_object_t first_object; 138 vm_prot_t first_prot; 139 vm_map_t map; 140 vm_map_entry_t entry; 141 int lookup_still_valid; 142 int hardfault; 143 int fault_flags; 144 int map_generation; 145 int shared; 146 int first_shared; 147 boolean_t wired; 148 struct vnode *vp; 149 }; 150 151 static int debug_fault = 0; 152 SYSCTL_INT(_vm, OID_AUTO, debug_fault, CTLFLAG_RW, &debug_fault, 0, ""); 153 static int debug_cluster = 0; 154 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, ""); 155 int vm_shared_fault = 1; 156 TUNABLE_INT("vm.shared_fault", &vm_shared_fault); 157 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, &vm_shared_fault, 0, 158 "Allow shared token on vm_object"); 159 160 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int); 161 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, 162 vpte_t, int, int); 163 #if 0 164 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *); 165 #endif 166 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry); 167 static void vm_prefault(pmap_t pmap, vm_offset_t addra, 168 vm_map_entry_t entry, int prot, int fault_flags); 169 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 170 vm_map_entry_t entry, int prot, int fault_flags); 171 172 static __inline void 173 release_page(struct faultstate *fs) 174 { 175 vm_page_deactivate(fs->m); 176 vm_page_wakeup(fs->m); 177 fs->m = NULL; 178 } 179 180 /* 181 * NOTE: Once unlocked any cached fs->entry becomes invalid, any reuse 182 * requires relocking and then checking the timestamp. 183 * 184 * NOTE: vm_map_lock_read() does not bump fs->map->timestamp so we do 185 * not have to update fs->map_generation here. 186 * 187 * NOTE: This function can fail due to a deadlock against the caller's 188 * holding of a vm_page BUSY. 189 */ 190 static __inline int 191 relock_map(struct faultstate *fs) 192 { 193 int error; 194 195 if (fs->lookup_still_valid == FALSE && fs->map) { 196 error = vm_map_lock_read_to(fs->map); 197 if (error == 0) 198 fs->lookup_still_valid = TRUE; 199 } else { 200 error = 0; 201 } 202 return error; 203 } 204 205 static __inline void 206 unlock_map(struct faultstate *fs) 207 { 208 if (fs->lookup_still_valid && fs->map) { 209 vm_map_lookup_done(fs->map, fs->entry, 0); 210 fs->lookup_still_valid = FALSE; 211 } 212 } 213 214 /* 215 * Clean up after a successful call to vm_fault_object() so another call 216 * to vm_fault_object() can be made. 217 */ 218 static void 219 _cleanup_successful_fault(struct faultstate *fs, int relock) 220 { 221 /* 222 * We allocated a junk page for a COW operation that did 223 * not occur, the page must be freed. 224 */ 225 if (fs->object != fs->first_object) { 226 KKASSERT(fs->first_shared == 0); 227 vm_page_free(fs->first_m); 228 vm_object_pip_wakeup(fs->object); 229 fs->first_m = NULL; 230 } 231 232 /* 233 * Reset fs->object. 234 */ 235 fs->object = fs->first_object; 236 if (relock && fs->lookup_still_valid == FALSE) { 237 if (fs->map) 238 vm_map_lock_read(fs->map); 239 fs->lookup_still_valid = TRUE; 240 } 241 } 242 243 static void 244 _unlock_things(struct faultstate *fs, int dealloc) 245 { 246 _cleanup_successful_fault(fs, 0); 247 if (dealloc) { 248 /*vm_object_deallocate(fs->first_object);*/ 249 /*fs->first_object = NULL; drop used later on */ 250 } 251 unlock_map(fs); 252 if (fs->vp != NULL) { 253 vput(fs->vp); 254 fs->vp = NULL; 255 } 256 } 257 258 #define unlock_things(fs) _unlock_things(fs, 0) 259 #define unlock_and_deallocate(fs) _unlock_things(fs, 1) 260 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1) 261 262 /* 263 * TRYPAGER 264 * 265 * Determine if the pager for the current object *might* contain the page. 266 * 267 * We only need to try the pager if this is not a default object (default 268 * objects are zero-fill and have no real pager), and if we are not taking 269 * a wiring fault or if the FS entry is wired. 270 */ 271 #define TRYPAGER(fs) \ 272 (fs->object->type != OBJT_DEFAULT && \ 273 (((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired)) 274 275 /* 276 * vm_fault: 277 * 278 * Handle a page fault occuring at the given address, requiring the given 279 * permissions, in the map specified. If successful, the page is inserted 280 * into the associated physical map. 281 * 282 * NOTE: The given address should be truncated to the proper page address. 283 * 284 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 285 * a standard error specifying why the fault is fatal is returned. 286 * 287 * The map in question must be referenced, and remains so. 288 * The caller may hold no locks. 289 * No other requirements. 290 */ 291 int 292 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) 293 { 294 int result; 295 vm_pindex_t first_pindex; 296 struct faultstate fs; 297 struct lwp *lp; 298 struct proc *p; 299 thread_t td; 300 struct vm_map_ilock ilock; 301 int didilock; 302 int growstack; 303 int retry = 0; 304 int inherit_prot; 305 306 inherit_prot = fault_type & VM_PROT_NOSYNC; 307 fs.hardfault = 0; 308 fs.fault_flags = fault_flags; 309 fs.vp = NULL; 310 fs.shared = vm_shared_fault; 311 fs.first_shared = vm_shared_fault; 312 growstack = 1; 313 314 /* 315 * vm_map interactions 316 */ 317 td = curthread; 318 if ((lp = td->td_lwp) != NULL) 319 lp->lwp_flags |= LWP_PAGING; 320 321 RetryFault: 322 /* 323 * Find the vm_map_entry representing the backing store and resolve 324 * the top level object and page index. This may have the side 325 * effect of executing a copy-on-write on the map entry, 326 * creating a shadow object, or splitting an anonymous entry for 327 * performance, but will not COW any actual VM pages. 328 * 329 * On success fs.map is left read-locked and various other fields 330 * are initialized but not otherwise referenced or locked. 331 * 332 * NOTE! vm_map_lookup will try to upgrade the fault_type to 333 * VM_FAULT_WRITE if the map entry is a virtual page table 334 * and also writable, so we can set the 'A'accessed bit in 335 * the virtual page table entry. 336 */ 337 fs.map = map; 338 result = vm_map_lookup(&fs.map, vaddr, fault_type, 339 &fs.entry, &fs.first_object, 340 &first_pindex, &fs.first_prot, &fs.wired); 341 342 /* 343 * If the lookup failed or the map protections are incompatible, 344 * the fault generally fails. 345 * 346 * The failure could be due to TDF_NOFAULT if vm_map_lookup() 347 * tried to do a COW fault. 348 * 349 * If the caller is trying to do a user wiring we have more work 350 * to do. 351 */ 352 if (result != KERN_SUCCESS) { 353 if (result == KERN_FAILURE_NOFAULT) { 354 result = KERN_FAILURE; 355 goto done; 356 } 357 if (result != KERN_PROTECTION_FAILURE || 358 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 359 { 360 if (result == KERN_INVALID_ADDRESS && growstack && 361 map != &kernel_map && curproc != NULL) { 362 result = vm_map_growstack(map, vaddr); 363 if (result == KERN_SUCCESS) { 364 growstack = 0; 365 ++retry; 366 goto RetryFault; 367 } 368 result = KERN_FAILURE; 369 } 370 goto done; 371 } 372 373 /* 374 * If we are user-wiring a r/w segment, and it is COW, then 375 * we need to do the COW operation. Note that we don't 376 * currently COW RO sections now, because it is NOT desirable 377 * to COW .text. We simply keep .text from ever being COW'ed 378 * and take the heat that one cannot debug wired .text sections. 379 */ 380 result = vm_map_lookup(&fs.map, vaddr, 381 VM_PROT_READ|VM_PROT_WRITE| 382 VM_PROT_OVERRIDE_WRITE, 383 &fs.entry, &fs.first_object, 384 &first_pindex, &fs.first_prot, 385 &fs.wired); 386 if (result != KERN_SUCCESS) { 387 /* could also be KERN_FAILURE_NOFAULT */ 388 result = KERN_FAILURE; 389 goto done; 390 } 391 392 /* 393 * If we don't COW now, on a user wire, the user will never 394 * be able to write to the mapping. If we don't make this 395 * restriction, the bookkeeping would be nearly impossible. 396 * 397 * XXX We have a shared lock, this will have a MP race but 398 * I don't see how it can hurt anything. 399 */ 400 if ((fs.entry->protection & VM_PROT_WRITE) == 0) { 401 atomic_clear_char(&fs.entry->max_protection, 402 VM_PROT_WRITE); 403 } 404 } 405 406 /* 407 * fs.map is read-locked 408 * 409 * Misc checks. Save the map generation number to detect races. 410 */ 411 fs.map_generation = fs.map->timestamp; 412 fs.lookup_still_valid = TRUE; 413 fs.first_m = NULL; 414 fs.object = fs.first_object; /* so unlock_and_deallocate works */ 415 fs.prot = fs.first_prot; /* default (used by uksmap) */ 416 417 if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) { 418 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 419 panic("vm_fault: fault on nofault entry, addr: %p", 420 (void *)vaddr); 421 } 422 if ((fs.entry->eflags & MAP_ENTRY_KSTACK) && 423 vaddr >= fs.entry->start && 424 vaddr < fs.entry->start + PAGE_SIZE) { 425 panic("vm_fault: fault on stack guard, addr: %p", 426 (void *)vaddr); 427 } 428 } 429 430 /* 431 * A user-kernel shared map has no VM object and bypasses 432 * everything. We execute the uksmap function with a temporary 433 * fictitious vm_page. The address is directly mapped with no 434 * management. 435 */ 436 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 437 struct vm_page fakem; 438 439 bzero(&fakem, sizeof(fakem)); 440 fakem.pindex = first_pindex; 441 fakem.flags = PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED; 442 fakem.valid = VM_PAGE_BITS_ALL; 443 fakem.pat_mode = VM_MEMATTR_DEFAULT; 444 if (fs.entry->object.uksmap(fs.entry->aux.dev, &fakem)) { 445 result = KERN_FAILURE; 446 unlock_things(&fs); 447 goto done2; 448 } 449 pmap_enter(fs.map->pmap, vaddr, &fakem, fs.prot | inherit_prot, 450 fs.wired, fs.entry); 451 goto done_success; 452 } 453 454 /* 455 * A system map entry may return a NULL object. No object means 456 * no pager means an unrecoverable kernel fault. 457 */ 458 if (fs.first_object == NULL) { 459 panic("vm_fault: unrecoverable fault at %p in entry %p", 460 (void *)vaddr, fs.entry); 461 } 462 463 /* 464 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 465 * is set. 466 * 467 * Unfortunately a deadlock can occur if we are forced to page-in 468 * from swap, but diving all the way into the vm_pager_get_page() 469 * function to find out is too much. Just check the object type. 470 * 471 * The deadlock is a CAM deadlock on a busy VM page when trying 472 * to finish an I/O if another process gets stuck in 473 * vop_helper_read_shortcut() due to a swap fault. 474 */ 475 if ((td->td_flags & TDF_NOFAULT) && 476 (retry || 477 fs.first_object->type == OBJT_VNODE || 478 fs.first_object->type == OBJT_SWAP || 479 fs.first_object->backing_object)) { 480 result = KERN_FAILURE; 481 unlock_things(&fs); 482 goto done2; 483 } 484 485 /* 486 * If the entry is wired we cannot change the page protection. 487 */ 488 if (fs.wired) 489 fault_type = fs.first_prot; 490 491 /* 492 * We generally want to avoid unnecessary exclusive modes on backing 493 * and terminal objects because this can seriously interfere with 494 * heavily fork()'d processes (particularly /bin/sh scripts). 495 * 496 * However, we also want to avoid unnecessary retries due to needed 497 * shared->exclusive promotion for common faults. Exclusive mode is 498 * always needed if any page insertion, rename, or free occurs in an 499 * object (and also indirectly if any I/O is done). 500 * 501 * The main issue here is going to be fs.first_shared. If the 502 * first_object has a backing object which isn't shadowed and the 503 * process is single-threaded we might as well use an exclusive 504 * lock/chain right off the bat. 505 */ 506 if (fs.first_shared && fs.first_object->backing_object && 507 LIST_EMPTY(&fs.first_object->shadow_head) && 508 td->td_proc && td->td_proc->p_nthreads == 1) { 509 fs.first_shared = 0; 510 } 511 512 /* 513 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 514 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 515 * we can try shared first. 516 */ 517 if (fault_flags & VM_FAULT_UNSWAP) { 518 fs.first_shared = 0; 519 } 520 521 /* 522 * Obtain a top-level object lock, shared or exclusive depending 523 * on fs.first_shared. If a shared lock winds up being insufficient 524 * we will retry with an exclusive lock. 525 * 526 * The vnode pager lock is always shared. 527 */ 528 if (fs.first_shared) 529 vm_object_hold_shared(fs.first_object); 530 else 531 vm_object_hold(fs.first_object); 532 if (fs.vp == NULL) 533 fs.vp = vnode_pager_lock(fs.first_object); 534 535 /* 536 * The page we want is at (first_object, first_pindex), but if the 537 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the 538 * page table to figure out the actual pindex. 539 * 540 * NOTE! DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION 541 * ONLY 542 */ 543 didilock = 0; 544 if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) { 545 vm_map_interlock(fs.map, &ilock, vaddr, vaddr + PAGE_SIZE); 546 didilock = 1; 547 result = vm_fault_vpagetable(&fs, &first_pindex, 548 fs.entry->aux.master_pde, 549 fault_type, 1); 550 if (result == KERN_TRY_AGAIN) { 551 vm_map_deinterlock(fs.map, &ilock); 552 vm_object_drop(fs.first_object); 553 ++retry; 554 goto RetryFault; 555 } 556 if (result != KERN_SUCCESS) { 557 vm_map_deinterlock(fs.map, &ilock); 558 goto done; 559 } 560 } 561 562 /* 563 * Now we have the actual (object, pindex), fault in the page. If 564 * vm_fault_object() fails it will unlock and deallocate the FS 565 * data. If it succeeds everything remains locked and fs->object 566 * will have an additional PIP count if it is not equal to 567 * fs->first_object 568 * 569 * vm_fault_object will set fs->prot for the pmap operation. It is 570 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the 571 * page can be safely written. However, it will force a read-only 572 * mapping for a read fault if the memory is managed by a virtual 573 * page table. 574 * 575 * If the fault code uses the shared object lock shortcut 576 * we must not try to burst (we can't allocate VM pages). 577 */ 578 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 579 580 if (debug_fault > 0) { 581 --debug_fault; 582 kprintf("VM_FAULT result %d addr=%jx type=%02x flags=%02x " 583 "fs.m=%p fs.prot=%02x fs.wired=%02x fs.entry=%p\n", 584 result, (intmax_t)vaddr, fault_type, fault_flags, 585 fs.m, fs.prot, fs.wired, fs.entry); 586 } 587 588 if (result == KERN_TRY_AGAIN) { 589 if (didilock) 590 vm_map_deinterlock(fs.map, &ilock); 591 vm_object_drop(fs.first_object); 592 ++retry; 593 goto RetryFault; 594 } 595 if (result != KERN_SUCCESS) { 596 if (didilock) 597 vm_map_deinterlock(fs.map, &ilock); 598 goto done; 599 } 600 601 /* 602 * On success vm_fault_object() does not unlock or deallocate, and fs.m 603 * will contain a busied page. 604 * 605 * Enter the page into the pmap and do pmap-related adjustments. 606 */ 607 KKASSERT(fs.lookup_still_valid == TRUE); 608 vm_page_flag_set(fs.m, PG_REFERENCED); 609 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot | inherit_prot, 610 fs.wired, fs.entry); 611 612 if (didilock) 613 vm_map_deinterlock(fs.map, &ilock); 614 615 /*KKASSERT(fs.m->queue == PQ_NONE); page-in op may deactivate page */ 616 KKASSERT(fs.m->flags & PG_BUSY); 617 618 /* 619 * If the page is not wired down, then put it where the pageout daemon 620 * can find it. 621 */ 622 if (fs.fault_flags & VM_FAULT_WIRE_MASK) { 623 if (fs.wired) 624 vm_page_wire(fs.m); 625 else 626 vm_page_unwire(fs.m, 1); 627 } else { 628 vm_page_activate(fs.m); 629 } 630 vm_page_wakeup(fs.m); 631 632 /* 633 * Burst in a few more pages if possible. The fs.map should still 634 * be locked. To avoid interlocking against a vnode->getblk 635 * operation we had to be sure to unbusy our primary vm_page above 636 * first. 637 * 638 * A normal burst can continue down backing store, only execute 639 * if we are holding an exclusive lock, otherwise the exclusive 640 * locks the burst code gets might cause excessive SMP collisions. 641 * 642 * A quick burst can be utilized when there is no backing object 643 * (i.e. a shared file mmap). 644 */ 645 if ((fault_flags & VM_FAULT_BURST) && 646 (fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 && 647 fs.wired == 0) { 648 if (fs.first_shared == 0 && fs.shared == 0) { 649 vm_prefault(fs.map->pmap, vaddr, 650 fs.entry, fs.prot, fault_flags); 651 } else { 652 vm_prefault_quick(fs.map->pmap, vaddr, 653 fs.entry, fs.prot, fault_flags); 654 } 655 } 656 657 done_success: 658 mycpu->gd_cnt.v_vm_faults++; 659 if (td->td_lwp) 660 ++td->td_lwp->lwp_ru.ru_minflt; 661 662 /* 663 * Unlock everything, and return 664 */ 665 unlock_things(&fs); 666 667 if (td->td_lwp) { 668 if (fs.hardfault) { 669 td->td_lwp->lwp_ru.ru_majflt++; 670 } else { 671 td->td_lwp->lwp_ru.ru_minflt++; 672 } 673 } 674 675 /*vm_object_deallocate(fs.first_object);*/ 676 /*fs.m = NULL; */ 677 /*fs.first_object = NULL; must still drop later */ 678 679 result = KERN_SUCCESS; 680 done: 681 if (fs.first_object) 682 vm_object_drop(fs.first_object); 683 done2: 684 if (lp) 685 lp->lwp_flags &= ~LWP_PAGING; 686 687 #if !defined(NO_SWAPPING) 688 /* 689 * Check the process RSS limit and force deactivation and 690 * (asynchronous) paging if necessary. This is a complex operation, 691 * only do it for direct user-mode faults, for now. 692 * 693 * To reduce overhead implement approximately a ~16MB hysteresis. 694 */ 695 p = td->td_proc; 696 if ((fault_flags & VM_FAULT_USERMODE) && lp && 697 p->p_limit && map->pmap && vm_pageout_memuse_mode >= 1 && 698 map != &kernel_map) { 699 vm_pindex_t limit; 700 vm_pindex_t size; 701 702 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 703 p->p_rlimit[RLIMIT_RSS].rlim_max)); 704 size = pmap_resident_tlnw_count(map->pmap); 705 if (limit >= 0 && size > 4096 && size - 4096 >= limit) { 706 vm_pageout_map_deactivate_pages(map, limit); 707 } 708 } 709 #endif 710 711 return (result); 712 } 713 714 /* 715 * Fault in the specified virtual address in the current process map, 716 * returning a held VM page or NULL. See vm_fault_page() for more 717 * information. 718 * 719 * No requirements. 720 */ 721 vm_page_t 722 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, 723 int *errorp, int *busyp) 724 { 725 struct lwp *lp = curthread->td_lwp; 726 vm_page_t m; 727 728 m = vm_fault_page(&lp->lwp_vmspace->vm_map, va, 729 fault_type, VM_FAULT_NORMAL, 730 errorp, busyp); 731 return(m); 732 } 733 734 /* 735 * Fault in the specified virtual address in the specified map, doing all 736 * necessary manipulation of the object store and all necessary I/O. Return 737 * a held VM page or NULL, and set *errorp. The related pmap is not 738 * updated. 739 * 740 * If busyp is not NULL then *busyp will be set to TRUE if this routine 741 * decides to return a busied page (aka VM_PROT_WRITE), or FALSE if it 742 * does not (VM_PROT_WRITE not specified or busyp is NULL). If busyp is 743 * NULL the returned page is only held. 744 * 745 * If the caller has no intention of writing to the page's contents, busyp 746 * can be passed as NULL along with VM_PROT_WRITE to force a COW operation 747 * without busying the page. 748 * 749 * The returned page will also be marked PG_REFERENCED. 750 * 751 * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an 752 * error will be returned. 753 * 754 * No requirements. 755 */ 756 vm_page_t 757 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 758 int fault_flags, int *errorp, int *busyp) 759 { 760 vm_pindex_t first_pindex; 761 struct faultstate fs; 762 int result; 763 int retry; 764 int growstack; 765 vm_prot_t orig_fault_type = fault_type; 766 767 retry = 0; 768 fs.hardfault = 0; 769 fs.fault_flags = fault_flags; 770 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 771 772 /* 773 * Dive the pmap (concurrency possible). If we find the 774 * appropriate page we can terminate early and quickly. 775 * 776 * This works great for normal programs but will always return 777 * NULL for host lookups of vkernel maps in VMM mode. 778 * 779 * NOTE: pmap_fault_page_quick() might not busy the page. If 780 * VM_PROT_WRITE or VM_PROT_OVERRIDE_WRITE is set in 781 * fault_type and pmap_fault_page_quick() returns non-NULL, 782 * it will safely dirty the returned vm_page_t for us. We 783 * cannot safely dirty it here (it might not be busy). 784 */ 785 fs.m = pmap_fault_page_quick(map->pmap, vaddr, fault_type, busyp); 786 if (fs.m) { 787 *errorp = 0; 788 return(fs.m); 789 } 790 791 /* 792 * Otherwise take a concurrency hit and do a formal page 793 * fault. 794 */ 795 fs.vp = NULL; 796 fs.shared = vm_shared_fault; 797 fs.first_shared = vm_shared_fault; 798 growstack = 1; 799 800 /* 801 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 802 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 803 * we can try shared first. 804 */ 805 if (fault_flags & VM_FAULT_UNSWAP) { 806 fs.first_shared = 0; 807 } 808 809 RetryFault: 810 /* 811 * Find the vm_map_entry representing the backing store and resolve 812 * the top level object and page index. This may have the side 813 * effect of executing a copy-on-write on the map entry and/or 814 * creating a shadow object, but will not COW any actual VM pages. 815 * 816 * On success fs.map is left read-locked and various other fields 817 * are initialized but not otherwise referenced or locked. 818 * 819 * NOTE! vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE 820 * if the map entry is a virtual page table and also writable, 821 * so we can set the 'A'accessed bit in the virtual page table 822 * entry. 823 */ 824 fs.map = map; 825 result = vm_map_lookup(&fs.map, vaddr, fault_type, 826 &fs.entry, &fs.first_object, 827 &first_pindex, &fs.first_prot, &fs.wired); 828 829 if (result != KERN_SUCCESS) { 830 if (result == KERN_FAILURE_NOFAULT) { 831 *errorp = KERN_FAILURE; 832 fs.m = NULL; 833 goto done; 834 } 835 if (result != KERN_PROTECTION_FAILURE || 836 (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) 837 { 838 if (result == KERN_INVALID_ADDRESS && growstack && 839 map != &kernel_map && curproc != NULL) { 840 result = vm_map_growstack(map, vaddr); 841 if (result == KERN_SUCCESS) { 842 growstack = 0; 843 ++retry; 844 goto RetryFault; 845 } 846 result = KERN_FAILURE; 847 } 848 fs.m = NULL; 849 *errorp = result; 850 goto done; 851 } 852 853 /* 854 * If we are user-wiring a r/w segment, and it is COW, then 855 * we need to do the COW operation. Note that we don't 856 * currently COW RO sections now, because it is NOT desirable 857 * to COW .text. We simply keep .text from ever being COW'ed 858 * and take the heat that one cannot debug wired .text sections. 859 */ 860 result = vm_map_lookup(&fs.map, vaddr, 861 VM_PROT_READ|VM_PROT_WRITE| 862 VM_PROT_OVERRIDE_WRITE, 863 &fs.entry, &fs.first_object, 864 &first_pindex, &fs.first_prot, 865 &fs.wired); 866 if (result != KERN_SUCCESS) { 867 /* could also be KERN_FAILURE_NOFAULT */ 868 *errorp = KERN_FAILURE; 869 fs.m = NULL; 870 goto done; 871 } 872 873 /* 874 * If we don't COW now, on a user wire, the user will never 875 * be able to write to the mapping. If we don't make this 876 * restriction, the bookkeeping would be nearly impossible. 877 * 878 * XXX We have a shared lock, this will have a MP race but 879 * I don't see how it can hurt anything. 880 */ 881 if ((fs.entry->protection & VM_PROT_WRITE) == 0) { 882 atomic_clear_char(&fs.entry->max_protection, 883 VM_PROT_WRITE); 884 } 885 } 886 887 /* 888 * fs.map is read-locked 889 * 890 * Misc checks. Save the map generation number to detect races. 891 */ 892 fs.map_generation = fs.map->timestamp; 893 fs.lookup_still_valid = TRUE; 894 fs.first_m = NULL; 895 fs.object = fs.first_object; /* so unlock_and_deallocate works */ 896 897 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 898 panic("vm_fault: fault on nofault entry, addr: %lx", 899 (u_long)vaddr); 900 } 901 902 /* 903 * A user-kernel shared map has no VM object and bypasses 904 * everything. We execute the uksmap function with a temporary 905 * fictitious vm_page. The address is directly mapped with no 906 * management. 907 */ 908 if (fs.entry->maptype == VM_MAPTYPE_UKSMAP) { 909 struct vm_page fakem; 910 911 bzero(&fakem, sizeof(fakem)); 912 fakem.pindex = first_pindex; 913 fakem.flags = PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED; 914 fakem.valid = VM_PAGE_BITS_ALL; 915 fakem.pat_mode = VM_MEMATTR_DEFAULT; 916 if (fs.entry->object.uksmap(fs.entry->aux.dev, &fakem)) { 917 *errorp = KERN_FAILURE; 918 fs.m = NULL; 919 unlock_things(&fs); 920 goto done2; 921 } 922 fs.m = PHYS_TO_VM_PAGE(fakem.phys_addr); 923 vm_page_hold(fs.m); 924 if (busyp) 925 *busyp = 0; /* don't need to busy R or W */ 926 unlock_things(&fs); 927 *errorp = 0; 928 goto done; 929 } 930 931 932 /* 933 * A system map entry may return a NULL object. No object means 934 * no pager means an unrecoverable kernel fault. 935 */ 936 if (fs.first_object == NULL) { 937 panic("vm_fault: unrecoverable fault at %p in entry %p", 938 (void *)vaddr, fs.entry); 939 } 940 941 /* 942 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT 943 * is set. 944 * 945 * Unfortunately a deadlock can occur if we are forced to page-in 946 * from swap, but diving all the way into the vm_pager_get_page() 947 * function to find out is too much. Just check the object type. 948 */ 949 if ((curthread->td_flags & TDF_NOFAULT) && 950 (retry || 951 fs.first_object->type == OBJT_VNODE || 952 fs.first_object->type == OBJT_SWAP || 953 fs.first_object->backing_object)) { 954 *errorp = KERN_FAILURE; 955 unlock_things(&fs); 956 fs.m = NULL; 957 goto done2; 958 } 959 960 /* 961 * If the entry is wired we cannot change the page protection. 962 */ 963 if (fs.wired) 964 fault_type = fs.first_prot; 965 966 /* 967 * Make a reference to this object to prevent its disposal while we 968 * are messing with it. Once we have the reference, the map is free 969 * to be diddled. Since objects reference their shadows (and copies), 970 * they will stay around as well. 971 * 972 * The reference should also prevent an unexpected collapse of the 973 * parent that might move pages from the current object into the 974 * parent unexpectedly, resulting in corruption. 975 * 976 * Bump the paging-in-progress count to prevent size changes (e.g. 977 * truncation operations) during I/O. This must be done after 978 * obtaining the vnode lock in order to avoid possible deadlocks. 979 */ 980 if (fs.first_shared) 981 vm_object_hold_shared(fs.first_object); 982 else 983 vm_object_hold(fs.first_object); 984 if (fs.vp == NULL) 985 fs.vp = vnode_pager_lock(fs.first_object); /* shared */ 986 987 /* 988 * The page we want is at (first_object, first_pindex), but if the 989 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the 990 * page table to figure out the actual pindex. 991 * 992 * NOTE! DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION 993 * ONLY 994 */ 995 if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) { 996 result = vm_fault_vpagetable(&fs, &first_pindex, 997 fs.entry->aux.master_pde, 998 fault_type, 1); 999 if (result == KERN_TRY_AGAIN) { 1000 vm_object_drop(fs.first_object); 1001 ++retry; 1002 goto RetryFault; 1003 } 1004 if (result != KERN_SUCCESS) { 1005 *errorp = result; 1006 fs.m = NULL; 1007 goto done; 1008 } 1009 } 1010 1011 /* 1012 * Now we have the actual (object, pindex), fault in the page. If 1013 * vm_fault_object() fails it will unlock and deallocate the FS 1014 * data. If it succeeds everything remains locked and fs->object 1015 * will have an additinal PIP count if it is not equal to 1016 * fs->first_object 1017 */ 1018 fs.m = NULL; 1019 result = vm_fault_object(&fs, first_pindex, fault_type, 1); 1020 1021 if (result == KERN_TRY_AGAIN) { 1022 vm_object_drop(fs.first_object); 1023 ++retry; 1024 goto RetryFault; 1025 } 1026 if (result != KERN_SUCCESS) { 1027 *errorp = result; 1028 fs.m = NULL; 1029 goto done; 1030 } 1031 1032 if ((orig_fault_type & VM_PROT_WRITE) && 1033 (fs.prot & VM_PROT_WRITE) == 0) { 1034 *errorp = KERN_PROTECTION_FAILURE; 1035 unlock_and_deallocate(&fs); 1036 fs.m = NULL; 1037 goto done; 1038 } 1039 1040 /* 1041 * DO NOT UPDATE THE PMAP!!! This function may be called for 1042 * a pmap unrelated to the current process pmap, in which case 1043 * the current cpu core will not be listed in the pmap's pm_active 1044 * mask. Thus invalidation interlocks will fail to work properly. 1045 * 1046 * (for example, 'ps' uses procfs to read program arguments from 1047 * each process's stack). 1048 * 1049 * In addition to the above this function will be called to acquire 1050 * a page that might already be faulted in, re-faulting it 1051 * continuously is a waste of time. 1052 * 1053 * XXX could this have been the cause of our random seg-fault 1054 * issues? procfs accesses user stacks. 1055 */ 1056 vm_page_flag_set(fs.m, PG_REFERENCED); 1057 #if 0 1058 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired, NULL); 1059 mycpu->gd_cnt.v_vm_faults++; 1060 if (curthread->td_lwp) 1061 ++curthread->td_lwp->lwp_ru.ru_minflt; 1062 #endif 1063 1064 /* 1065 * On success vm_fault_object() does not unlock or deallocate, and fs.m 1066 * will contain a busied page. So we must unlock here after having 1067 * messed with the pmap. 1068 */ 1069 unlock_things(&fs); 1070 1071 /* 1072 * Return a held page. We are not doing any pmap manipulation so do 1073 * not set PG_MAPPED. However, adjust the page flags according to 1074 * the fault type because the caller may not use a managed pmapping 1075 * (so we don't want to lose the fact that the page will be dirtied 1076 * if a write fault was specified). 1077 */ 1078 if (fault_type & VM_PROT_WRITE) 1079 vm_page_dirty(fs.m); 1080 vm_page_activate(fs.m); 1081 1082 if (curthread->td_lwp) { 1083 if (fs.hardfault) { 1084 curthread->td_lwp->lwp_ru.ru_majflt++; 1085 } else { 1086 curthread->td_lwp->lwp_ru.ru_minflt++; 1087 } 1088 } 1089 1090 /* 1091 * Unlock everything, and return the held or busied page. 1092 */ 1093 if (busyp) { 1094 if (fault_type & (VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE)) { 1095 vm_page_dirty(fs.m); 1096 *busyp = 1; 1097 } else { 1098 *busyp = 0; 1099 vm_page_hold(fs.m); 1100 vm_page_wakeup(fs.m); 1101 } 1102 } else { 1103 vm_page_hold(fs.m); 1104 vm_page_wakeup(fs.m); 1105 } 1106 /*vm_object_deallocate(fs.first_object);*/ 1107 /*fs.first_object = NULL; */ 1108 *errorp = 0; 1109 1110 done: 1111 if (fs.first_object) 1112 vm_object_drop(fs.first_object); 1113 done2: 1114 return(fs.m); 1115 } 1116 1117 /* 1118 * Fault in the specified (object,offset), dirty the returned page as 1119 * needed. If the requested fault_type cannot be done NULL and an 1120 * error is returned. 1121 * 1122 * A held (but not busied) page is returned. 1123 * 1124 * The passed in object must be held as specified by the shared 1125 * argument. 1126 */ 1127 vm_page_t 1128 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset, 1129 vm_prot_t fault_type, int fault_flags, 1130 int *sharedp, int *errorp) 1131 { 1132 int result; 1133 vm_pindex_t first_pindex; 1134 struct faultstate fs; 1135 struct vm_map_entry entry; 1136 1137 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1138 bzero(&entry, sizeof(entry)); 1139 entry.object.vm_object = object; 1140 entry.maptype = VM_MAPTYPE_NORMAL; 1141 entry.protection = entry.max_protection = fault_type; 1142 1143 fs.hardfault = 0; 1144 fs.fault_flags = fault_flags; 1145 fs.map = NULL; 1146 fs.shared = vm_shared_fault; 1147 fs.first_shared = *sharedp; 1148 fs.vp = NULL; 1149 KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0); 1150 1151 /* 1152 * VM_FAULT_UNSWAP - swap_pager_unswapped() needs an exclusive object 1153 * VM_FAULT_DIRTY - may require swap_pager_unswapped() later, but 1154 * we can try shared first. 1155 */ 1156 if (fs.first_shared && (fault_flags & VM_FAULT_UNSWAP)) { 1157 fs.first_shared = 0; 1158 vm_object_upgrade(object); 1159 } 1160 1161 /* 1162 * Retry loop as needed (typically for shared->exclusive transitions) 1163 */ 1164 RetryFault: 1165 *sharedp = fs.first_shared; 1166 first_pindex = OFF_TO_IDX(offset); 1167 fs.first_object = object; 1168 fs.entry = &entry; 1169 fs.first_prot = fault_type; 1170 fs.wired = 0; 1171 /*fs.map_generation = 0; unused */ 1172 1173 /* 1174 * Make a reference to this object to prevent its disposal while we 1175 * are messing with it. Once we have the reference, the map is free 1176 * to be diddled. Since objects reference their shadows (and copies), 1177 * they will stay around as well. 1178 * 1179 * The reference should also prevent an unexpected collapse of the 1180 * parent that might move pages from the current object into the 1181 * parent unexpectedly, resulting in corruption. 1182 * 1183 * Bump the paging-in-progress count to prevent size changes (e.g. 1184 * truncation operations) during I/O. This must be done after 1185 * obtaining the vnode lock in order to avoid possible deadlocks. 1186 */ 1187 if (fs.vp == NULL) 1188 fs.vp = vnode_pager_lock(fs.first_object); 1189 1190 fs.lookup_still_valid = TRUE; 1191 fs.first_m = NULL; 1192 fs.object = fs.first_object; /* so unlock_and_deallocate works */ 1193 1194 #if 0 1195 /* XXX future - ability to operate on VM object using vpagetable */ 1196 if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) { 1197 result = vm_fault_vpagetable(&fs, &first_pindex, 1198 fs.entry->aux.master_pde, 1199 fault_type, 0); 1200 if (result == KERN_TRY_AGAIN) { 1201 if (fs.first_shared == 0 && *sharedp) 1202 vm_object_upgrade(object); 1203 goto RetryFault; 1204 } 1205 if (result != KERN_SUCCESS) { 1206 *errorp = result; 1207 return (NULL); 1208 } 1209 } 1210 #endif 1211 1212 /* 1213 * Now we have the actual (object, pindex), fault in the page. If 1214 * vm_fault_object() fails it will unlock and deallocate the FS 1215 * data. If it succeeds everything remains locked and fs->object 1216 * will have an additinal PIP count if it is not equal to 1217 * fs->first_object 1218 * 1219 * On KERN_TRY_AGAIN vm_fault_object() leaves fs.first_object intact. 1220 * We may have to upgrade its lock to handle the requested fault. 1221 */ 1222 result = vm_fault_object(&fs, first_pindex, fault_type, 0); 1223 1224 if (result == KERN_TRY_AGAIN) { 1225 if (fs.first_shared == 0 && *sharedp) 1226 vm_object_upgrade(object); 1227 goto RetryFault; 1228 } 1229 if (result != KERN_SUCCESS) { 1230 *errorp = result; 1231 return(NULL); 1232 } 1233 1234 if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) { 1235 *errorp = KERN_PROTECTION_FAILURE; 1236 unlock_and_deallocate(&fs); 1237 return(NULL); 1238 } 1239 1240 /* 1241 * On success vm_fault_object() does not unlock or deallocate, so we 1242 * do it here. Note that the returned fs.m will be busied. 1243 */ 1244 unlock_things(&fs); 1245 1246 /* 1247 * Return a held page. We are not doing any pmap manipulation so do 1248 * not set PG_MAPPED. However, adjust the page flags according to 1249 * the fault type because the caller may not use a managed pmapping 1250 * (so we don't want to lose the fact that the page will be dirtied 1251 * if a write fault was specified). 1252 */ 1253 vm_page_hold(fs.m); 1254 vm_page_activate(fs.m); 1255 if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY)) 1256 vm_page_dirty(fs.m); 1257 if (fault_flags & VM_FAULT_UNSWAP) 1258 swap_pager_unswapped(fs.m); 1259 1260 /* 1261 * Indicate that the page was accessed. 1262 */ 1263 vm_page_flag_set(fs.m, PG_REFERENCED); 1264 1265 if (curthread->td_lwp) { 1266 if (fs.hardfault) { 1267 curthread->td_lwp->lwp_ru.ru_majflt++; 1268 } else { 1269 curthread->td_lwp->lwp_ru.ru_minflt++; 1270 } 1271 } 1272 1273 /* 1274 * Unlock everything, and return the held page. 1275 */ 1276 vm_page_wakeup(fs.m); 1277 /*vm_object_deallocate(fs.first_object);*/ 1278 /*fs.first_object = NULL; */ 1279 1280 *errorp = 0; 1281 return(fs.m); 1282 } 1283 1284 /* 1285 * Translate the virtual page number (first_pindex) that is relative 1286 * to the address space into a logical page number that is relative to the 1287 * backing object. Use the virtual page table pointed to by (vpte). 1288 * 1289 * Possibly downgrade the protection based on the vpte bits. 1290 * 1291 * This implements an N-level page table. Any level can terminate the 1292 * scan by setting VPTE_PS. A linear mapping is accomplished by setting 1293 * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP). 1294 */ 1295 static 1296 int 1297 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex, 1298 vpte_t vpte, int fault_type, int allow_nofault) 1299 { 1300 struct lwbuf *lwb; 1301 struct lwbuf lwb_cache; 1302 int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */ 1303 int result; 1304 vpte_t *ptep; 1305 1306 ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object)); 1307 for (;;) { 1308 /* 1309 * We cannot proceed if the vpte is not valid, not readable 1310 * for a read fault, not writable for a write fault, or 1311 * not executable for an instruction execution fault. 1312 */ 1313 if ((vpte & VPTE_V) == 0) { 1314 unlock_and_deallocate(fs); 1315 return (KERN_FAILURE); 1316 } 1317 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW) == 0) { 1318 unlock_and_deallocate(fs); 1319 return (KERN_FAILURE); 1320 } 1321 if ((fault_type & VM_PROT_EXECUTE) && (vpte & VPTE_NX)) { 1322 unlock_and_deallocate(fs); 1323 return (KERN_FAILURE); 1324 } 1325 if ((vpte & VPTE_PS) || vshift == 0) 1326 break; 1327 1328 /* 1329 * Get the page table page. Nominally we only read the page 1330 * table, but since we are actively setting VPTE_M and VPTE_A, 1331 * tell vm_fault_object() that we are writing it. 1332 * 1333 * There is currently no real need to optimize this. 1334 */ 1335 result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT, 1336 VM_PROT_READ|VM_PROT_WRITE, 1337 allow_nofault); 1338 if (result != KERN_SUCCESS) 1339 return (result); 1340 1341 /* 1342 * Process the returned fs.m and look up the page table 1343 * entry in the page table page. 1344 */ 1345 vshift -= VPTE_PAGE_BITS; 1346 lwb = lwbuf_alloc(fs->m, &lwb_cache); 1347 ptep = ((vpte_t *)lwbuf_kva(lwb) + 1348 ((*pindex >> vshift) & VPTE_PAGE_MASK)); 1349 vm_page_activate(fs->m); 1350 1351 /* 1352 * Page table write-back - entire operation including 1353 * validation of the pte must be atomic to avoid races 1354 * against the vkernel changing the pte. 1355 * 1356 * If the vpte is valid for the* requested operation, do 1357 * a write-back to the page table. 1358 * 1359 * XXX VPTE_M is not set properly for page directory pages. 1360 * It doesn't get set in the page directory if the page table 1361 * is modified during a read access. 1362 */ 1363 for (;;) { 1364 vpte_t nvpte; 1365 1366 /* 1367 * Reload for the cmpset, but make sure the pte is 1368 * still valid. 1369 */ 1370 vpte = *ptep; 1371 cpu_ccfence(); 1372 nvpte = vpte; 1373 1374 if ((vpte & VPTE_V) == 0) 1375 break; 1376 1377 if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW)) 1378 nvpte |= VPTE_M | VPTE_A; 1379 if (fault_type & (VM_PROT_READ | VM_PROT_EXECUTE)) 1380 nvpte |= VPTE_A; 1381 if (vpte == nvpte) 1382 break; 1383 if (atomic_cmpset_long(ptep, vpte, nvpte)) { 1384 vm_page_dirty(fs->m); 1385 break; 1386 } 1387 } 1388 lwbuf_free(lwb); 1389 vm_page_flag_set(fs->m, PG_REFERENCED); 1390 vm_page_wakeup(fs->m); 1391 fs->m = NULL; 1392 cleanup_successful_fault(fs); 1393 } 1394 1395 /* 1396 * When the vkernel sets VPTE_RW it expects the real kernel to 1397 * reflect VPTE_M back when the page is modified via the mapping. 1398 * In order to accomplish this the real kernel must map the page 1399 * read-only for read faults and use write faults to reflect VPTE_M 1400 * back. 1401 * 1402 * Once VPTE_M has been set, the real kernel's pte allows writing. 1403 * If the vkernel clears VPTE_M the vkernel must be sure to 1404 * MADV_INVAL the real kernel's mappings to force the real kernel 1405 * to re-fault on the next write so oit can set VPTE_M again. 1406 */ 1407 if ((fault_type & VM_PROT_WRITE) == 0 && 1408 (vpte & (VPTE_RW | VPTE_M)) != (VPTE_RW | VPTE_M)) { 1409 fs->first_prot &= ~VM_PROT_WRITE; 1410 } 1411 1412 /* 1413 * Disable EXECUTE perms if NX bit is set. 1414 */ 1415 if (vpte & VPTE_NX) 1416 fs->first_prot &= ~VM_PROT_EXECUTE; 1417 1418 /* 1419 * Combine remaining address bits with the vpte. 1420 */ 1421 *pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) + 1422 (*pindex & ((1L << vshift) - 1)); 1423 return (KERN_SUCCESS); 1424 } 1425 1426 1427 /* 1428 * This is the core of the vm_fault code. 1429 * 1430 * Do all operations required to fault-in (fs.first_object, pindex). Run 1431 * through the shadow chain as necessary and do required COW or virtual 1432 * copy operations. The caller has already fully resolved the vm_map_entry 1433 * and, if appropriate, has created a copy-on-write layer. All we need to 1434 * do is iterate the object chain. 1435 * 1436 * On failure (fs) is unlocked and deallocated and the caller may return or 1437 * retry depending on the failure code. On success (fs) is NOT unlocked or 1438 * deallocated, fs.m will contained a resolved, busied page, and fs.object 1439 * will have an additional PIP count if it is not equal to fs.first_object. 1440 * 1441 * If locks based on fs->first_shared or fs->shared are insufficient, 1442 * clear the appropriate field(s) and return RETRY. COWs require that 1443 * first_shared be 0, while page allocations (or frees) require that 1444 * shared be 0. Renames require that both be 0. 1445 * 1446 * NOTE! fs->[first_]shared might be set with VM_FAULT_DIRTY also set. 1447 * we will have to retry with it exclusive if the vm_page is 1448 * PG_SWAPPED. 1449 * 1450 * fs->first_object must be held on call. 1451 */ 1452 static 1453 int 1454 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex, 1455 vm_prot_t fault_type, int allow_nofault) 1456 { 1457 vm_object_t next_object; 1458 vm_pindex_t pindex; 1459 int error; 1460 1461 ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object)); 1462 fs->prot = fs->first_prot; 1463 fs->object = fs->first_object; 1464 pindex = first_pindex; 1465 1466 vm_object_chain_acquire(fs->first_object, fs->shared); 1467 vm_object_pip_add(fs->first_object, 1); 1468 1469 /* 1470 * If a read fault occurs we try to upgrade the page protection 1471 * and make it also writable if possible. There are three cases 1472 * where we cannot make the page mapping writable: 1473 * 1474 * (1) The mapping is read-only or the VM object is read-only, 1475 * fs->prot above will simply not have VM_PROT_WRITE set. 1476 * 1477 * (2) If the mapping is a virtual page table fs->first_prot will 1478 * have already been properly adjusted by vm_fault_vpagetable(). 1479 * to detect writes so we can set VPTE_M in the virtual page 1480 * table. Used by vkernels. 1481 * 1482 * (3) If the VM page is read-only or copy-on-write, upgrading would 1483 * just result in an unnecessary COW fault. 1484 * 1485 * (4) If the pmap specifically requests A/M bit emulation, downgrade 1486 * here. 1487 */ 1488 #if 0 1489 /* see vpagetable code */ 1490 if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) { 1491 if ((fault_type & VM_PROT_WRITE) == 0) 1492 fs->prot &= ~VM_PROT_WRITE; 1493 } 1494 #endif 1495 1496 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace && 1497 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) { 1498 if ((fault_type & VM_PROT_WRITE) == 0) 1499 fs->prot &= ~VM_PROT_WRITE; 1500 } 1501 1502 /* vm_object_hold(fs->object); implied b/c object == first_object */ 1503 1504 for (;;) { 1505 /* 1506 * The entire backing chain from first_object to object 1507 * inclusive is chainlocked. 1508 * 1509 * If the object is dead, we stop here 1510 */ 1511 if (fs->object->flags & OBJ_DEAD) { 1512 vm_object_pip_wakeup(fs->first_object); 1513 vm_object_chain_release_all(fs->first_object, 1514 fs->object); 1515 if (fs->object != fs->first_object) 1516 vm_object_drop(fs->object); 1517 unlock_and_deallocate(fs); 1518 return (KERN_PROTECTION_FAILURE); 1519 } 1520 1521 /* 1522 * See if the page is resident. Wait/Retry if the page is 1523 * busy (lots of stuff may have changed so we can't continue 1524 * in that case). 1525 * 1526 * We can theoretically allow the soft-busy case on a read 1527 * fault if the page is marked valid, but since such 1528 * pages are typically already pmap'd, putting that 1529 * special case in might be more effort then it is 1530 * worth. We cannot under any circumstances mess 1531 * around with a vm_page_t->busy page except, perhaps, 1532 * to pmap it. 1533 */ 1534 fs->m = vm_page_lookup_busy_try(fs->object, pindex, 1535 TRUE, &error); 1536 if (error) { 1537 vm_object_pip_wakeup(fs->first_object); 1538 vm_object_chain_release_all(fs->first_object, 1539 fs->object); 1540 if (fs->object != fs->first_object) 1541 vm_object_drop(fs->object); 1542 unlock_things(fs); 1543 vm_page_sleep_busy(fs->m, TRUE, "vmpfw"); 1544 mycpu->gd_cnt.v_intrans++; 1545 /*vm_object_deallocate(fs->first_object);*/ 1546 /*fs->first_object = NULL;*/ 1547 fs->m = NULL; 1548 return (KERN_TRY_AGAIN); 1549 } 1550 if (fs->m) { 1551 /* 1552 * The page is busied for us. 1553 * 1554 * If reactivating a page from PQ_CACHE we may have 1555 * to rate-limit. 1556 */ 1557 int queue = fs->m->queue; 1558 vm_page_unqueue_nowakeup(fs->m); 1559 1560 if ((queue - fs->m->pc) == PQ_CACHE && 1561 vm_page_count_severe()) { 1562 vm_page_activate(fs->m); 1563 vm_page_wakeup(fs->m); 1564 fs->m = NULL; 1565 vm_object_pip_wakeup(fs->first_object); 1566 vm_object_chain_release_all(fs->first_object, 1567 fs->object); 1568 if (fs->object != fs->first_object) 1569 vm_object_drop(fs->object); 1570 unlock_and_deallocate(fs); 1571 if (allow_nofault == 0 || 1572 (curthread->td_flags & TDF_NOFAULT) == 0) { 1573 thread_t td; 1574 1575 vm_wait_pfault(); 1576 td = curthread; 1577 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1578 return (KERN_PROTECTION_FAILURE); 1579 } 1580 return (KERN_TRY_AGAIN); 1581 } 1582 1583 /* 1584 * If it still isn't completely valid (readable), 1585 * or if a read-ahead-mark is set on the VM page, 1586 * jump to readrest, else we found the page and 1587 * can return. 1588 * 1589 * We can release the spl once we have marked the 1590 * page busy. 1591 */ 1592 if (fs->m->object != &kernel_object) { 1593 if ((fs->m->valid & VM_PAGE_BITS_ALL) != 1594 VM_PAGE_BITS_ALL) { 1595 goto readrest; 1596 } 1597 if (fs->m->flags & PG_RAM) { 1598 if (debug_cluster) 1599 kprintf("R"); 1600 vm_page_flag_clear(fs->m, PG_RAM); 1601 goto readrest; 1602 } 1603 } 1604 break; /* break to PAGE HAS BEEN FOUND */ 1605 } 1606 1607 /* 1608 * Page is not resident, If this is the search termination 1609 * or the pager might contain the page, allocate a new page. 1610 */ 1611 if (TRYPAGER(fs) || fs->object == fs->first_object) { 1612 /* 1613 * Allocating, must be exclusive. 1614 */ 1615 if (fs->object == fs->first_object && 1616 fs->first_shared) { 1617 fs->first_shared = 0; 1618 vm_object_pip_wakeup(fs->first_object); 1619 vm_object_chain_release_all(fs->first_object, 1620 fs->object); 1621 if (fs->object != fs->first_object) 1622 vm_object_drop(fs->object); 1623 unlock_and_deallocate(fs); 1624 return (KERN_TRY_AGAIN); 1625 } 1626 if (fs->object != fs->first_object && 1627 fs->shared) { 1628 fs->first_shared = 0; 1629 fs->shared = 0; 1630 vm_object_pip_wakeup(fs->first_object); 1631 vm_object_chain_release_all(fs->first_object, 1632 fs->object); 1633 if (fs->object != fs->first_object) 1634 vm_object_drop(fs->object); 1635 unlock_and_deallocate(fs); 1636 return (KERN_TRY_AGAIN); 1637 } 1638 1639 /* 1640 * If the page is beyond the object size we fail 1641 */ 1642 if (pindex >= fs->object->size) { 1643 vm_object_pip_wakeup(fs->first_object); 1644 vm_object_chain_release_all(fs->first_object, 1645 fs->object); 1646 if (fs->object != fs->first_object) 1647 vm_object_drop(fs->object); 1648 unlock_and_deallocate(fs); 1649 return (KERN_PROTECTION_FAILURE); 1650 } 1651 1652 /* 1653 * Allocate a new page for this object/offset pair. 1654 * 1655 * It is possible for the allocation to race, so 1656 * handle the case. 1657 */ 1658 fs->m = NULL; 1659 if (!vm_page_count_severe()) { 1660 fs->m = vm_page_alloc(fs->object, pindex, 1661 ((fs->vp || fs->object->backing_object) ? 1662 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL : 1663 VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL | 1664 VM_ALLOC_USE_GD | VM_ALLOC_ZERO)); 1665 } 1666 if (fs->m == NULL) { 1667 vm_object_pip_wakeup(fs->first_object); 1668 vm_object_chain_release_all(fs->first_object, 1669 fs->object); 1670 if (fs->object != fs->first_object) 1671 vm_object_drop(fs->object); 1672 unlock_and_deallocate(fs); 1673 if (allow_nofault == 0 || 1674 (curthread->td_flags & TDF_NOFAULT) == 0) { 1675 thread_t td; 1676 1677 vm_wait_pfault(); 1678 td = curthread; 1679 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 1680 return (KERN_PROTECTION_FAILURE); 1681 } 1682 return (KERN_TRY_AGAIN); 1683 } 1684 1685 /* 1686 * Fall through to readrest. We have a new page which 1687 * will have to be paged (since m->valid will be 0). 1688 */ 1689 } 1690 1691 readrest: 1692 /* 1693 * We have found an invalid or partially valid page, a 1694 * page with a read-ahead mark which might be partially or 1695 * fully valid (and maybe dirty too), or we have allocated 1696 * a new page. 1697 * 1698 * Attempt to fault-in the page if there is a chance that the 1699 * pager has it, and potentially fault in additional pages 1700 * at the same time. 1701 * 1702 * If TRYPAGER is true then fs.m will be non-NULL and busied 1703 * for us. 1704 */ 1705 if (TRYPAGER(fs)) { 1706 int rv; 1707 int seqaccess; 1708 u_char behavior = vm_map_entry_behavior(fs->entry); 1709 1710 if (behavior == MAP_ENTRY_BEHAV_RANDOM) 1711 seqaccess = 0; 1712 else 1713 seqaccess = -1; 1714 1715 /* 1716 * Doing I/O may synchronously insert additional 1717 * pages so we can't be shared at this point either. 1718 * 1719 * NOTE: We can't free fs->m here in the allocated 1720 * case (fs->object != fs->first_object) as 1721 * this would require an exclusively locked 1722 * VM object. 1723 */ 1724 if (fs->object == fs->first_object && 1725 fs->first_shared) { 1726 vm_page_deactivate(fs->m); 1727 vm_page_wakeup(fs->m); 1728 fs->m = NULL; 1729 fs->first_shared = 0; 1730 vm_object_pip_wakeup(fs->first_object); 1731 vm_object_chain_release_all(fs->first_object, 1732 fs->object); 1733 if (fs->object != fs->first_object) 1734 vm_object_drop(fs->object); 1735 unlock_and_deallocate(fs); 1736 return (KERN_TRY_AGAIN); 1737 } 1738 if (fs->object != fs->first_object && 1739 fs->shared) { 1740 vm_page_deactivate(fs->m); 1741 vm_page_wakeup(fs->m); 1742 fs->m = NULL; 1743 fs->first_shared = 0; 1744 fs->shared = 0; 1745 vm_object_pip_wakeup(fs->first_object); 1746 vm_object_chain_release_all(fs->first_object, 1747 fs->object); 1748 if (fs->object != fs->first_object) 1749 vm_object_drop(fs->object); 1750 unlock_and_deallocate(fs); 1751 return (KERN_TRY_AGAIN); 1752 } 1753 1754 /* 1755 * Avoid deadlocking against the map when doing I/O. 1756 * fs.object and the page is PG_BUSY'd. 1757 * 1758 * NOTE: Once unlocked, fs->entry can become stale 1759 * so this will NULL it out. 1760 * 1761 * NOTE: fs->entry is invalid until we relock the 1762 * map and verify that the timestamp has not 1763 * changed. 1764 */ 1765 unlock_map(fs); 1766 1767 /* 1768 * Acquire the page data. We still hold a ref on 1769 * fs.object and the page has been PG_BUSY's. 1770 * 1771 * The pager may replace the page (for example, in 1772 * order to enter a fictitious page into the 1773 * object). If it does so it is responsible for 1774 * cleaning up the passed page and properly setting 1775 * the new page PG_BUSY. 1776 * 1777 * If we got here through a PG_RAM read-ahead 1778 * mark the page may be partially dirty and thus 1779 * not freeable. Don't bother checking to see 1780 * if the pager has the page because we can't free 1781 * it anyway. We have to depend on the get_page 1782 * operation filling in any gaps whether there is 1783 * backing store or not. 1784 */ 1785 rv = vm_pager_get_page(fs->object, &fs->m, seqaccess); 1786 1787 if (rv == VM_PAGER_OK) { 1788 /* 1789 * Relookup in case pager changed page. Pager 1790 * is responsible for disposition of old page 1791 * if moved. 1792 * 1793 * XXX other code segments do relookups too. 1794 * It's a bad abstraction that needs to be 1795 * fixed/removed. 1796 */ 1797 fs->m = vm_page_lookup(fs->object, pindex); 1798 if (fs->m == NULL) { 1799 vm_object_pip_wakeup(fs->first_object); 1800 vm_object_chain_release_all( 1801 fs->first_object, fs->object); 1802 if (fs->object != fs->first_object) 1803 vm_object_drop(fs->object); 1804 unlock_and_deallocate(fs); 1805 return (KERN_TRY_AGAIN); 1806 } 1807 ++fs->hardfault; 1808 break; /* break to PAGE HAS BEEN FOUND */ 1809 } 1810 1811 /* 1812 * Remove the bogus page (which does not exist at this 1813 * object/offset); before doing so, we must get back 1814 * our object lock to preserve our invariant. 1815 * 1816 * Also wake up any other process that may want to bring 1817 * in this page. 1818 * 1819 * If this is the top-level object, we must leave the 1820 * busy page to prevent another process from rushing 1821 * past us, and inserting the page in that object at 1822 * the same time that we are. 1823 */ 1824 if (rv == VM_PAGER_ERROR) { 1825 if (curproc) { 1826 kprintf("vm_fault: pager read error, " 1827 "pid %d (%s)\n", 1828 curproc->p_pid, 1829 curproc->p_comm); 1830 } else { 1831 kprintf("vm_fault: pager read error, " 1832 "thread %p (%s)\n", 1833 curthread, 1834 curproc->p_comm); 1835 } 1836 } 1837 1838 /* 1839 * Data outside the range of the pager or an I/O error 1840 * 1841 * The page may have been wired during the pagein, 1842 * e.g. by the buffer cache, and cannot simply be 1843 * freed. Call vnode_pager_freepage() to deal with it. 1844 * 1845 * Also note that we cannot free the page if we are 1846 * holding the related object shared. XXX not sure 1847 * what to do in that case. 1848 */ 1849 if (fs->object != fs->first_object) { 1850 /* 1851 * Scrap the page. Check to see if the 1852 * vm_pager_get_page() call has already 1853 * dealt with it. 1854 */ 1855 if (fs->m) { 1856 vnode_pager_freepage(fs->m); 1857 fs->m = NULL; 1858 } 1859 1860 /* 1861 * XXX - we cannot just fall out at this 1862 * point, m has been freed and is invalid! 1863 */ 1864 } 1865 /* 1866 * XXX - the check for kernel_map is a kludge to work 1867 * around having the machine panic on a kernel space 1868 * fault w/ I/O error. 1869 */ 1870 if (((fs->map != &kernel_map) && 1871 (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 1872 if (fs->m) { 1873 if (fs->first_shared) { 1874 vm_page_deactivate(fs->m); 1875 vm_page_wakeup(fs->m); 1876 } else { 1877 vnode_pager_freepage(fs->m); 1878 } 1879 fs->m = NULL; 1880 } 1881 vm_object_pip_wakeup(fs->first_object); 1882 vm_object_chain_release_all(fs->first_object, 1883 fs->object); 1884 if (fs->object != fs->first_object) 1885 vm_object_drop(fs->object); 1886 unlock_and_deallocate(fs); 1887 if (rv == VM_PAGER_ERROR) 1888 return (KERN_FAILURE); 1889 else 1890 return (KERN_PROTECTION_FAILURE); 1891 /* NOT REACHED */ 1892 } 1893 } 1894 1895 /* 1896 * We get here if the object has a default pager (or unwiring) 1897 * or the pager doesn't have the page. 1898 * 1899 * fs->first_m will be used for the COW unless we find a 1900 * deeper page to be mapped read-only, in which case the 1901 * unlock*(fs) will free first_m. 1902 */ 1903 if (fs->object == fs->first_object) 1904 fs->first_m = fs->m; 1905 1906 /* 1907 * Move on to the next object. The chain lock should prevent 1908 * the backing_object from getting ripped out from under us. 1909 * 1910 * The object lock for the next object is governed by 1911 * fs->shared. 1912 */ 1913 if ((next_object = fs->object->backing_object) != NULL) { 1914 if (fs->shared) 1915 vm_object_hold_shared(next_object); 1916 else 1917 vm_object_hold(next_object); 1918 vm_object_chain_acquire(next_object, fs->shared); 1919 KKASSERT(next_object == fs->object->backing_object); 1920 pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1921 } 1922 1923 if (next_object == NULL) { 1924 /* 1925 * If there's no object left, fill the page in the top 1926 * object with zeros. 1927 */ 1928 if (fs->object != fs->first_object) { 1929 #if 0 1930 if (fs->first_object->backing_object != 1931 fs->object) { 1932 vm_object_hold(fs->first_object->backing_object); 1933 } 1934 #endif 1935 vm_object_chain_release_all( 1936 fs->first_object->backing_object, 1937 fs->object); 1938 #if 0 1939 if (fs->first_object->backing_object != 1940 fs->object) { 1941 vm_object_drop(fs->first_object->backing_object); 1942 } 1943 #endif 1944 vm_object_pip_wakeup(fs->object); 1945 vm_object_drop(fs->object); 1946 fs->object = fs->first_object; 1947 pindex = first_pindex; 1948 fs->m = fs->first_m; 1949 } 1950 fs->first_m = NULL; 1951 1952 /* 1953 * Zero the page and mark it valid. 1954 */ 1955 vm_page_zero_fill(fs->m); 1956 mycpu->gd_cnt.v_zfod++; 1957 fs->m->valid = VM_PAGE_BITS_ALL; 1958 break; /* break to PAGE HAS BEEN FOUND */ 1959 } 1960 if (fs->object != fs->first_object) { 1961 vm_object_pip_wakeup(fs->object); 1962 vm_object_lock_swap(); 1963 vm_object_drop(fs->object); 1964 } 1965 KASSERT(fs->object != next_object, 1966 ("object loop %p", next_object)); 1967 fs->object = next_object; 1968 vm_object_pip_add(fs->object, 1); 1969 } 1970 1971 /* 1972 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 1973 * is held.] 1974 * 1975 * object still held. 1976 * 1977 * local shared variable may be different from fs->shared. 1978 * 1979 * If the page is being written, but isn't already owned by the 1980 * top-level object, we have to copy it into a new page owned by the 1981 * top-level object. 1982 */ 1983 KASSERT((fs->m->flags & PG_BUSY) != 0, 1984 ("vm_fault: not busy after main loop")); 1985 1986 if (fs->object != fs->first_object) { 1987 /* 1988 * We only really need to copy if we want to write it. 1989 */ 1990 if (fault_type & VM_PROT_WRITE) { 1991 /* 1992 * This allows pages to be virtually copied from a 1993 * backing_object into the first_object, where the 1994 * backing object has no other refs to it, and cannot 1995 * gain any more refs. Instead of a bcopy, we just 1996 * move the page from the backing object to the 1997 * first object. Note that we must mark the page 1998 * dirty in the first object so that it will go out 1999 * to swap when needed. 2000 */ 2001 if ( 2002 /* 2003 * Must be holding exclusive locks 2004 */ 2005 fs->first_shared == 0 && 2006 fs->shared == 0 && 2007 /* 2008 * Map, if present, has not changed 2009 */ 2010 (fs->map == NULL || 2011 fs->map_generation == fs->map->timestamp) && 2012 /* 2013 * Only one shadow object 2014 */ 2015 (fs->object->shadow_count == 1) && 2016 /* 2017 * No COW refs, except us 2018 */ 2019 (fs->object->ref_count == 1) && 2020 /* 2021 * No one else can look this object up 2022 */ 2023 (fs->object->handle == NULL) && 2024 /* 2025 * No other ways to look the object up 2026 */ 2027 ((fs->object->type == OBJT_DEFAULT) || 2028 (fs->object->type == OBJT_SWAP)) && 2029 /* 2030 * We don't chase down the shadow chain 2031 */ 2032 (fs->object == fs->first_object->backing_object) && 2033 2034 /* 2035 * grab the lock if we need to 2036 */ 2037 (fs->lookup_still_valid || 2038 fs->map == NULL || 2039 lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0) 2040 ) { 2041 /* 2042 * (first_m) and (m) are both busied. We have 2043 * move (m) into (first_m)'s object/pindex 2044 * in an atomic fashion, then free (first_m). 2045 * 2046 * first_object is held so second remove 2047 * followed by the rename should wind 2048 * up being atomic. vm_page_free() might 2049 * block so we don't do it until after the 2050 * rename. 2051 */ 2052 fs->lookup_still_valid = 1; 2053 vm_page_protect(fs->first_m, VM_PROT_NONE); 2054 vm_page_remove(fs->first_m); 2055 vm_page_rename(fs->m, fs->first_object, 2056 first_pindex); 2057 vm_page_free(fs->first_m); 2058 fs->first_m = fs->m; 2059 fs->m = NULL; 2060 mycpu->gd_cnt.v_cow_optim++; 2061 } else { 2062 /* 2063 * Oh, well, lets copy it. 2064 * 2065 * Why are we unmapping the original page 2066 * here? Well, in short, not all accessors 2067 * of user memory go through the pmap. The 2068 * procfs code doesn't have access user memory 2069 * via a local pmap, so vm_fault_page*() 2070 * can't call pmap_enter(). And the umtx*() 2071 * code may modify the COW'd page via a DMAP 2072 * or kernel mapping and not via the pmap, 2073 * leaving the original page still mapped 2074 * read-only into the pmap. 2075 * 2076 * So we have to remove the page from at 2077 * least the current pmap if it is in it. 2078 * Just remove it from all pmaps. 2079 */ 2080 KKASSERT(fs->first_shared == 0); 2081 vm_page_copy(fs->m, fs->first_m); 2082 vm_page_protect(fs->m, VM_PROT_NONE); 2083 } 2084 2085 /* 2086 * We no longer need the old page or object. 2087 */ 2088 if (fs->m) 2089 release_page(fs); 2090 2091 /* 2092 * We intend to revert to first_object, undo the 2093 * chain lock through to that. 2094 */ 2095 #if 0 2096 if (fs->first_object->backing_object != fs->object) 2097 vm_object_hold(fs->first_object->backing_object); 2098 #endif 2099 vm_object_chain_release_all( 2100 fs->first_object->backing_object, 2101 fs->object); 2102 #if 0 2103 if (fs->first_object->backing_object != fs->object) 2104 vm_object_drop(fs->first_object->backing_object); 2105 #endif 2106 2107 /* 2108 * fs->object != fs->first_object due to above 2109 * conditional 2110 */ 2111 vm_object_pip_wakeup(fs->object); 2112 vm_object_drop(fs->object); 2113 2114 /* 2115 * Only use the new page below... 2116 */ 2117 mycpu->gd_cnt.v_cow_faults++; 2118 fs->m = fs->first_m; 2119 fs->object = fs->first_object; 2120 pindex = first_pindex; 2121 } else { 2122 /* 2123 * If it wasn't a write fault avoid having to copy 2124 * the page by mapping it read-only. 2125 */ 2126 fs->prot &= ~VM_PROT_WRITE; 2127 } 2128 } 2129 2130 /* 2131 * Relock the map if necessary, then check the generation count. 2132 * relock_map() will update fs->timestamp to account for the 2133 * relocking if necessary. 2134 * 2135 * If the count has changed after relocking then all sorts of 2136 * crap may have happened and we have to retry. 2137 * 2138 * NOTE: The relock_map() can fail due to a deadlock against 2139 * the vm_page we are holding BUSY. 2140 */ 2141 if (fs->lookup_still_valid == FALSE && fs->map) { 2142 if (relock_map(fs) || 2143 fs->map->timestamp != fs->map_generation) { 2144 release_page(fs); 2145 vm_object_pip_wakeup(fs->first_object); 2146 vm_object_chain_release_all(fs->first_object, 2147 fs->object); 2148 if (fs->object != fs->first_object) 2149 vm_object_drop(fs->object); 2150 unlock_and_deallocate(fs); 2151 return (KERN_TRY_AGAIN); 2152 } 2153 } 2154 2155 /* 2156 * If the fault is a write, we know that this page is being 2157 * written NOW so dirty it explicitly to save on pmap_is_modified() 2158 * calls later. 2159 * 2160 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC 2161 * if the page is already dirty to prevent data written with 2162 * the expectation of being synced from not being synced. 2163 * Likewise if this entry does not request NOSYNC then make 2164 * sure the page isn't marked NOSYNC. Applications sharing 2165 * data should use the same flags to avoid ping ponging. 2166 * 2167 * Also tell the backing pager, if any, that it should remove 2168 * any swap backing since the page is now dirty. 2169 */ 2170 vm_page_activate(fs->m); 2171 if (fs->prot & VM_PROT_WRITE) { 2172 vm_object_set_writeable_dirty(fs->m->object); 2173 vm_set_nosync(fs->m, fs->entry); 2174 if (fs->fault_flags & VM_FAULT_DIRTY) { 2175 vm_page_dirty(fs->m); 2176 if (fs->m->flags & PG_SWAPPED) { 2177 /* 2178 * If the page is swapped out we have to call 2179 * swap_pager_unswapped() which requires an 2180 * exclusive object lock. If we are shared, 2181 * we must clear the shared flag and retry. 2182 */ 2183 if ((fs->object == fs->first_object && 2184 fs->first_shared) || 2185 (fs->object != fs->first_object && 2186 fs->shared)) { 2187 vm_page_wakeup(fs->m); 2188 fs->m = NULL; 2189 if (fs->object == fs->first_object) 2190 fs->first_shared = 0; 2191 else 2192 fs->shared = 0; 2193 vm_object_pip_wakeup(fs->first_object); 2194 vm_object_chain_release_all( 2195 fs->first_object, fs->object); 2196 if (fs->object != fs->first_object) 2197 vm_object_drop(fs->object); 2198 unlock_and_deallocate(fs); 2199 return (KERN_TRY_AGAIN); 2200 } 2201 swap_pager_unswapped(fs->m); 2202 } 2203 } 2204 } 2205 2206 vm_object_pip_wakeup(fs->first_object); 2207 vm_object_chain_release_all(fs->first_object, fs->object); 2208 if (fs->object != fs->first_object) 2209 vm_object_drop(fs->object); 2210 2211 /* 2212 * Page had better still be busy. We are still locked up and 2213 * fs->object will have another PIP reference if it is not equal 2214 * to fs->first_object. 2215 */ 2216 KASSERT(fs->m->flags & PG_BUSY, 2217 ("vm_fault: page %p not busy!", fs->m)); 2218 2219 /* 2220 * Sanity check: page must be completely valid or it is not fit to 2221 * map into user space. vm_pager_get_pages() ensures this. 2222 */ 2223 if (fs->m->valid != VM_PAGE_BITS_ALL) { 2224 vm_page_zero_invalid(fs->m, TRUE); 2225 kprintf("Warning: page %p partially invalid on fault\n", fs->m); 2226 } 2227 2228 return (KERN_SUCCESS); 2229 } 2230 2231 /* 2232 * Wire down a range of virtual addresses in a map. The entry in question 2233 * should be marked in-transition and the map must be locked. We must 2234 * release the map temporarily while faulting-in the page to avoid a 2235 * deadlock. Note that the entry may be clipped while we are blocked but 2236 * will never be freed. 2237 * 2238 * No requirements. 2239 */ 2240 int 2241 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, 2242 boolean_t user_wire, int kmflags) 2243 { 2244 boolean_t fictitious; 2245 vm_offset_t start; 2246 vm_offset_t end; 2247 vm_offset_t va; 2248 pmap_t pmap; 2249 int rv; 2250 int wire_prot; 2251 int fault_flags; 2252 vm_page_t m; 2253 2254 if (user_wire) { 2255 wire_prot = VM_PROT_READ; 2256 fault_flags = VM_FAULT_USER_WIRE; 2257 } else { 2258 wire_prot = VM_PROT_READ | VM_PROT_WRITE; 2259 fault_flags = VM_FAULT_CHANGE_WIRING; 2260 } 2261 if (kmflags & KM_NOTLBSYNC) 2262 wire_prot |= VM_PROT_NOSYNC; 2263 2264 pmap = vm_map_pmap(map); 2265 start = entry->start; 2266 end = entry->end; 2267 2268 switch(entry->maptype) { 2269 case VM_MAPTYPE_NORMAL: 2270 case VM_MAPTYPE_VPAGETABLE: 2271 fictitious = entry->object.vm_object && 2272 ((entry->object.vm_object->type == OBJT_DEVICE) || 2273 (entry->object.vm_object->type == OBJT_MGTDEVICE)); 2274 break; 2275 case VM_MAPTYPE_UKSMAP: 2276 fictitious = TRUE; 2277 break; 2278 default: 2279 fictitious = FALSE; 2280 break; 2281 } 2282 2283 if (entry->eflags & MAP_ENTRY_KSTACK) 2284 start += PAGE_SIZE; 2285 map->timestamp++; 2286 vm_map_unlock(map); 2287 2288 /* 2289 * We simulate a fault to get the page and enter it in the physical 2290 * map. 2291 */ 2292 for (va = start; va < end; va += PAGE_SIZE) { 2293 rv = vm_fault(map, va, wire_prot, fault_flags); 2294 if (rv) { 2295 while (va > start) { 2296 va -= PAGE_SIZE; 2297 m = pmap_unwire(pmap, va); 2298 if (m && !fictitious) { 2299 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2300 vm_page_unwire(m, 1); 2301 vm_page_wakeup(m); 2302 } 2303 } 2304 goto done; 2305 } 2306 } 2307 rv = KERN_SUCCESS; 2308 done: 2309 vm_map_lock(map); 2310 2311 return (rv); 2312 } 2313 2314 /* 2315 * Unwire a range of virtual addresses in a map. The map should be 2316 * locked. 2317 */ 2318 void 2319 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry) 2320 { 2321 boolean_t fictitious; 2322 vm_offset_t start; 2323 vm_offset_t end; 2324 vm_offset_t va; 2325 pmap_t pmap; 2326 vm_page_t m; 2327 2328 pmap = vm_map_pmap(map); 2329 start = entry->start; 2330 end = entry->end; 2331 fictitious = entry->object.vm_object && 2332 ((entry->object.vm_object->type == OBJT_DEVICE) || 2333 (entry->object.vm_object->type == OBJT_MGTDEVICE)); 2334 if (entry->eflags & MAP_ENTRY_KSTACK) 2335 start += PAGE_SIZE; 2336 2337 /* 2338 * Since the pages are wired down, we must be able to get their 2339 * mappings from the physical map system. 2340 */ 2341 for (va = start; va < end; va += PAGE_SIZE) { 2342 m = pmap_unwire(pmap, va); 2343 if (m && !fictitious) { 2344 vm_page_busy_wait(m, FALSE, "vmwrpg"); 2345 vm_page_unwire(m, 1); 2346 vm_page_wakeup(m); 2347 } 2348 } 2349 } 2350 2351 /* 2352 * Copy all of the pages from a wired-down map entry to another. 2353 * 2354 * The source and destination maps must be locked for write. 2355 * The source and destination maps token must be held 2356 * The source map entry must be wired down (or be a sharing map 2357 * entry corresponding to a main map entry that is wired down). 2358 * 2359 * No other requirements. 2360 * 2361 * XXX do segment optimization 2362 */ 2363 void 2364 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 2365 vm_map_entry_t dst_entry, vm_map_entry_t src_entry) 2366 { 2367 vm_object_t dst_object; 2368 vm_object_t src_object; 2369 vm_ooffset_t dst_offset; 2370 vm_ooffset_t src_offset; 2371 vm_prot_t prot; 2372 vm_offset_t vaddr; 2373 vm_page_t dst_m; 2374 vm_page_t src_m; 2375 2376 src_object = src_entry->object.vm_object; 2377 src_offset = src_entry->offset; 2378 2379 /* 2380 * Create the top-level object for the destination entry. (Doesn't 2381 * actually shadow anything - we copy the pages directly.) 2382 */ 2383 vm_map_entry_allocate_object(dst_entry); 2384 dst_object = dst_entry->object.vm_object; 2385 2386 prot = dst_entry->max_protection; 2387 2388 /* 2389 * Loop through all of the pages in the entry's range, copying each 2390 * one from the source object (it should be there) to the destination 2391 * object. 2392 */ 2393 vm_object_hold(src_object); 2394 vm_object_hold(dst_object); 2395 for (vaddr = dst_entry->start, dst_offset = 0; 2396 vaddr < dst_entry->end; 2397 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 2398 2399 /* 2400 * Allocate a page in the destination object 2401 */ 2402 do { 2403 dst_m = vm_page_alloc(dst_object, 2404 OFF_TO_IDX(dst_offset), 2405 VM_ALLOC_NORMAL); 2406 if (dst_m == NULL) { 2407 vm_wait(0); 2408 } 2409 } while (dst_m == NULL); 2410 2411 /* 2412 * Find the page in the source object, and copy it in. 2413 * (Because the source is wired down, the page will be in 2414 * memory.) 2415 */ 2416 src_m = vm_page_lookup(src_object, 2417 OFF_TO_IDX(dst_offset + src_offset)); 2418 if (src_m == NULL) 2419 panic("vm_fault_copy_wired: page missing"); 2420 2421 vm_page_copy(src_m, dst_m); 2422 2423 /* 2424 * Enter it in the pmap... 2425 */ 2426 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry); 2427 2428 /* 2429 * Mark it no longer busy, and put it on the active list. 2430 */ 2431 vm_page_activate(dst_m); 2432 vm_page_wakeup(dst_m); 2433 } 2434 vm_object_drop(dst_object); 2435 vm_object_drop(src_object); 2436 } 2437 2438 #if 0 2439 2440 /* 2441 * This routine checks around the requested page for other pages that 2442 * might be able to be faulted in. This routine brackets the viable 2443 * pages for the pages to be paged in. 2444 * 2445 * Inputs: 2446 * m, rbehind, rahead 2447 * 2448 * Outputs: 2449 * marray (array of vm_page_t), reqpage (index of requested page) 2450 * 2451 * Return value: 2452 * number of pages in marray 2453 */ 2454 static int 2455 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead, 2456 vm_page_t *marray, int *reqpage) 2457 { 2458 int i,j; 2459 vm_object_t object; 2460 vm_pindex_t pindex, startpindex, endpindex, tpindex; 2461 vm_page_t rtm; 2462 int cbehind, cahead; 2463 2464 object = m->object; 2465 pindex = m->pindex; 2466 2467 /* 2468 * we don't fault-ahead for device pager 2469 */ 2470 if ((object->type == OBJT_DEVICE) || 2471 (object->type == OBJT_MGTDEVICE)) { 2472 *reqpage = 0; 2473 marray[0] = m; 2474 return 1; 2475 } 2476 2477 /* 2478 * if the requested page is not available, then give up now 2479 */ 2480 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 2481 *reqpage = 0; /* not used by caller, fix compiler warn */ 2482 return 0; 2483 } 2484 2485 if ((cbehind == 0) && (cahead == 0)) { 2486 *reqpage = 0; 2487 marray[0] = m; 2488 return 1; 2489 } 2490 2491 if (rahead > cahead) { 2492 rahead = cahead; 2493 } 2494 2495 if (rbehind > cbehind) { 2496 rbehind = cbehind; 2497 } 2498 2499 /* 2500 * Do not do any readahead if we have insufficient free memory. 2501 * 2502 * XXX code was broken disabled before and has instability 2503 * with this conditonal fixed, so shortcut for now. 2504 */ 2505 if (burst_fault == 0 || vm_page_count_severe()) { 2506 marray[0] = m; 2507 *reqpage = 0; 2508 return 1; 2509 } 2510 2511 /* 2512 * scan backward for the read behind pages -- in memory 2513 * 2514 * Assume that if the page is not found an interrupt will not 2515 * create it. Theoretically interrupts can only remove (busy) 2516 * pages, not create new associations. 2517 */ 2518 if (pindex > 0) { 2519 if (rbehind > pindex) { 2520 rbehind = pindex; 2521 startpindex = 0; 2522 } else { 2523 startpindex = pindex - rbehind; 2524 } 2525 2526 vm_object_hold(object); 2527 for (tpindex = pindex; tpindex > startpindex; --tpindex) { 2528 if (vm_page_lookup(object, tpindex - 1)) 2529 break; 2530 } 2531 2532 i = 0; 2533 while (tpindex < pindex) { 2534 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2535 VM_ALLOC_NULL_OK); 2536 if (rtm == NULL) { 2537 for (j = 0; j < i; j++) { 2538 vm_page_free(marray[j]); 2539 } 2540 vm_object_drop(object); 2541 marray[0] = m; 2542 *reqpage = 0; 2543 return 1; 2544 } 2545 marray[i] = rtm; 2546 ++i; 2547 ++tpindex; 2548 } 2549 vm_object_drop(object); 2550 } else { 2551 i = 0; 2552 } 2553 2554 /* 2555 * Assign requested page 2556 */ 2557 marray[i] = m; 2558 *reqpage = i; 2559 ++i; 2560 2561 /* 2562 * Scan forwards for read-ahead pages 2563 */ 2564 tpindex = pindex + 1; 2565 endpindex = tpindex + rahead; 2566 if (endpindex > object->size) 2567 endpindex = object->size; 2568 2569 vm_object_hold(object); 2570 while (tpindex < endpindex) { 2571 if (vm_page_lookup(object, tpindex)) 2572 break; 2573 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM | 2574 VM_ALLOC_NULL_OK); 2575 if (rtm == NULL) 2576 break; 2577 marray[i] = rtm; 2578 ++i; 2579 ++tpindex; 2580 } 2581 vm_object_drop(object); 2582 2583 return (i); 2584 } 2585 2586 #endif 2587 2588 /* 2589 * vm_prefault() provides a quick way of clustering pagefaults into a 2590 * processes address space. It is a "cousin" of pmap_object_init_pt, 2591 * except it runs at page fault time instead of mmap time. 2592 * 2593 * vm.fast_fault Enables pre-faulting zero-fill pages 2594 * 2595 * vm.prefault_pages Number of pages (1/2 negative, 1/2 positive) to 2596 * prefault. Scan stops in either direction when 2597 * a page is found to already exist. 2598 * 2599 * This code used to be per-platform pmap_prefault(). It is now 2600 * machine-independent and enhanced to also pre-fault zero-fill pages 2601 * (see vm.fast_fault) as well as make them writable, which greatly 2602 * reduces the number of page faults programs incur. 2603 * 2604 * Application performance when pre-faulting zero-fill pages is heavily 2605 * dependent on the application. Very tiny applications like /bin/echo 2606 * lose a little performance while applications of any appreciable size 2607 * gain performance. Prefaulting multiple pages also reduces SMP 2608 * congestion and can improve SMP performance significantly. 2609 * 2610 * NOTE! prot may allow writing but this only applies to the top level 2611 * object. If we wind up mapping a page extracted from a backing 2612 * object we have to make sure it is read-only. 2613 * 2614 * NOTE! The caller has already handled any COW operations on the 2615 * vm_map_entry via the normal fault code. Do NOT call this 2616 * shortcut unless the normal fault code has run on this entry. 2617 * 2618 * The related map must be locked. 2619 * No other requirements. 2620 */ 2621 static int vm_prefault_pages = 8; 2622 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0, 2623 "Maximum number of pages to pre-fault"); 2624 static int vm_fast_fault = 1; 2625 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0, 2626 "Burst fault zero-fill regions"); 2627 2628 /* 2629 * Set PG_NOSYNC if the map entry indicates so, but only if the page 2630 * is not already dirty by other means. This will prevent passive 2631 * filesystem syncing as well as 'sync' from writing out the page. 2632 */ 2633 static void 2634 vm_set_nosync(vm_page_t m, vm_map_entry_t entry) 2635 { 2636 if (entry->eflags & MAP_ENTRY_NOSYNC) { 2637 if (m->dirty == 0) 2638 vm_page_flag_set(m, PG_NOSYNC); 2639 } else { 2640 vm_page_flag_clear(m, PG_NOSYNC); 2641 } 2642 } 2643 2644 static void 2645 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot, 2646 int fault_flags) 2647 { 2648 struct lwp *lp; 2649 vm_page_t m; 2650 vm_offset_t addr; 2651 vm_pindex_t index; 2652 vm_pindex_t pindex; 2653 vm_object_t object; 2654 int pprot; 2655 int i; 2656 int noneg; 2657 int nopos; 2658 int maxpages; 2659 2660 /* 2661 * Get stable max count value, disabled if set to 0 2662 */ 2663 maxpages = vm_prefault_pages; 2664 cpu_ccfence(); 2665 if (maxpages <= 0) 2666 return; 2667 2668 /* 2669 * We do not currently prefault mappings that use virtual page 2670 * tables. We do not prefault foreign pmaps. 2671 */ 2672 if (entry->maptype != VM_MAPTYPE_NORMAL) 2673 return; 2674 lp = curthread->td_lwp; 2675 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 2676 return; 2677 2678 /* 2679 * Limit pre-fault count to 1024 pages. 2680 */ 2681 if (maxpages > 1024) 2682 maxpages = 1024; 2683 2684 object = entry->object.vm_object; 2685 KKASSERT(object != NULL); 2686 KKASSERT(object == entry->object.vm_object); 2687 2688 /* 2689 * NOTE: VM_FAULT_DIRTY allowed later so must hold object exclusively 2690 * now (or do something more complex XXX). 2691 */ 2692 vm_object_hold(object); 2693 vm_object_chain_acquire(object, 0); 2694 2695 noneg = 0; 2696 nopos = 0; 2697 for (i = 0; i < maxpages; ++i) { 2698 vm_object_t lobject; 2699 vm_object_t nobject; 2700 int allocated = 0; 2701 int error; 2702 2703 /* 2704 * This can eat a lot of time on a heavily contended 2705 * machine so yield on the tick if needed. 2706 */ 2707 if ((i & 7) == 7) 2708 lwkt_yield(); 2709 2710 /* 2711 * Calculate the page to pre-fault, stopping the scan in 2712 * each direction separately if the limit is reached. 2713 */ 2714 if (i & 1) { 2715 if (noneg) 2716 continue; 2717 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 2718 } else { 2719 if (nopos) 2720 continue; 2721 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 2722 } 2723 if (addr < entry->start) { 2724 noneg = 1; 2725 if (noneg && nopos) 2726 break; 2727 continue; 2728 } 2729 if (addr >= entry->end) { 2730 nopos = 1; 2731 if (noneg && nopos) 2732 break; 2733 continue; 2734 } 2735 2736 /* 2737 * Skip pages already mapped, and stop scanning in that 2738 * direction. When the scan terminates in both directions 2739 * we are done. 2740 */ 2741 if (pmap_prefault_ok(pmap, addr) == 0) { 2742 if (i & 1) 2743 noneg = 1; 2744 else 2745 nopos = 1; 2746 if (noneg && nopos) 2747 break; 2748 continue; 2749 } 2750 2751 /* 2752 * Follow the VM object chain to obtain the page to be mapped 2753 * into the pmap. 2754 * 2755 * If we reach the terminal object without finding a page 2756 * and we determine it would be advantageous, then allocate 2757 * a zero-fill page for the base object. The base object 2758 * is guaranteed to be OBJT_DEFAULT for this case. 2759 * 2760 * In order to not have to check the pager via *haspage*() 2761 * we stop if any non-default object is encountered. e.g. 2762 * a vnode or swap object would stop the loop. 2763 */ 2764 index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 2765 lobject = object; 2766 pindex = index; 2767 pprot = prot; 2768 2769 KKASSERT(lobject == entry->object.vm_object); 2770 /*vm_object_hold(lobject); implied */ 2771 2772 while ((m = vm_page_lookup_busy_try(lobject, pindex, 2773 TRUE, &error)) == NULL) { 2774 if (lobject->type != OBJT_DEFAULT) 2775 break; 2776 if (lobject->backing_object == NULL) { 2777 if (vm_fast_fault == 0) 2778 break; 2779 if ((prot & VM_PROT_WRITE) == 0 || 2780 vm_page_count_min(0)) { 2781 break; 2782 } 2783 2784 /* 2785 * NOTE: Allocated from base object 2786 */ 2787 m = vm_page_alloc(object, index, 2788 VM_ALLOC_NORMAL | 2789 VM_ALLOC_ZERO | 2790 VM_ALLOC_USE_GD | 2791 VM_ALLOC_NULL_OK); 2792 if (m == NULL) 2793 break; 2794 allocated = 1; 2795 pprot = prot; 2796 /* lobject = object .. not needed */ 2797 break; 2798 } 2799 if (lobject->backing_object_offset & PAGE_MASK) 2800 break; 2801 nobject = lobject->backing_object; 2802 vm_object_hold(nobject); 2803 KKASSERT(nobject == lobject->backing_object); 2804 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 2805 if (lobject != object) { 2806 vm_object_lock_swap(); 2807 vm_object_drop(lobject); 2808 } 2809 lobject = nobject; 2810 pprot &= ~VM_PROT_WRITE; 2811 vm_object_chain_acquire(lobject, 0); 2812 } 2813 2814 /* 2815 * NOTE: A non-NULL (m) will be associated with lobject if 2816 * it was found there, otherwise it is probably a 2817 * zero-fill page associated with the base object. 2818 * 2819 * Give-up if no page is available. 2820 */ 2821 if (m == NULL) { 2822 if (lobject != object) { 2823 #if 0 2824 if (object->backing_object != lobject) 2825 vm_object_hold(object->backing_object); 2826 #endif 2827 vm_object_chain_release_all( 2828 object->backing_object, lobject); 2829 #if 0 2830 if (object->backing_object != lobject) 2831 vm_object_drop(object->backing_object); 2832 #endif 2833 vm_object_drop(lobject); 2834 } 2835 break; 2836 } 2837 2838 /* 2839 * The object must be marked dirty if we are mapping a 2840 * writable page. m->object is either lobject or object, 2841 * both of which are still held. Do this before we 2842 * potentially drop the object. 2843 */ 2844 if (pprot & VM_PROT_WRITE) 2845 vm_object_set_writeable_dirty(m->object); 2846 2847 /* 2848 * Do not conditionalize on PG_RAM. If pages are present in 2849 * the VM system we assume optimal caching. If caching is 2850 * not optimal the I/O gravy train will be restarted when we 2851 * hit an unavailable page. We do not want to try to restart 2852 * the gravy train now because we really don't know how much 2853 * of the object has been cached. The cost for restarting 2854 * the gravy train should be low (since accesses will likely 2855 * be I/O bound anyway). 2856 */ 2857 if (lobject != object) { 2858 #if 0 2859 if (object->backing_object != lobject) 2860 vm_object_hold(object->backing_object); 2861 #endif 2862 vm_object_chain_release_all(object->backing_object, 2863 lobject); 2864 #if 0 2865 if (object->backing_object != lobject) 2866 vm_object_drop(object->backing_object); 2867 #endif 2868 vm_object_drop(lobject); 2869 } 2870 2871 /* 2872 * Enter the page into the pmap if appropriate. If we had 2873 * allocated the page we have to place it on a queue. If not 2874 * we just have to make sure it isn't on the cache queue 2875 * (pages on the cache queue are not allowed to be mapped). 2876 */ 2877 if (allocated) { 2878 /* 2879 * Page must be zerod. 2880 */ 2881 vm_page_zero_fill(m); 2882 mycpu->gd_cnt.v_zfod++; 2883 m->valid = VM_PAGE_BITS_ALL; 2884 2885 /* 2886 * Handle dirty page case 2887 */ 2888 if (pprot & VM_PROT_WRITE) 2889 vm_set_nosync(m, entry); 2890 pmap_enter(pmap, addr, m, pprot, 0, entry); 2891 mycpu->gd_cnt.v_vm_faults++; 2892 if (curthread->td_lwp) 2893 ++curthread->td_lwp->lwp_ru.ru_minflt; 2894 vm_page_deactivate(m); 2895 if (pprot & VM_PROT_WRITE) { 2896 /*vm_object_set_writeable_dirty(m->object);*/ 2897 vm_set_nosync(m, entry); 2898 if (fault_flags & VM_FAULT_DIRTY) { 2899 vm_page_dirty(m); 2900 /*XXX*/ 2901 swap_pager_unswapped(m); 2902 } 2903 } 2904 vm_page_wakeup(m); 2905 } else if (error) { 2906 /* couldn't busy page, no wakeup */ 2907 } else if ( 2908 ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 2909 (m->flags & PG_FICTITIOUS) == 0) { 2910 /* 2911 * A fully valid page not undergoing soft I/O can 2912 * be immediately entered into the pmap. 2913 */ 2914 if ((m->queue - m->pc) == PQ_CACHE) 2915 vm_page_deactivate(m); 2916 if (pprot & VM_PROT_WRITE) { 2917 /*vm_object_set_writeable_dirty(m->object);*/ 2918 vm_set_nosync(m, entry); 2919 if (fault_flags & VM_FAULT_DIRTY) { 2920 vm_page_dirty(m); 2921 /*XXX*/ 2922 swap_pager_unswapped(m); 2923 } 2924 } 2925 if (pprot & VM_PROT_WRITE) 2926 vm_set_nosync(m, entry); 2927 pmap_enter(pmap, addr, m, pprot, 0, entry); 2928 mycpu->gd_cnt.v_vm_faults++; 2929 if (curthread->td_lwp) 2930 ++curthread->td_lwp->lwp_ru.ru_minflt; 2931 vm_page_wakeup(m); 2932 } else { 2933 vm_page_wakeup(m); 2934 } 2935 } 2936 vm_object_chain_release(object); 2937 vm_object_drop(object); 2938 } 2939 2940 /* 2941 * Object can be held shared 2942 */ 2943 static void 2944 vm_prefault_quick(pmap_t pmap, vm_offset_t addra, 2945 vm_map_entry_t entry, int prot, int fault_flags) 2946 { 2947 struct lwp *lp; 2948 vm_page_t m; 2949 vm_offset_t addr; 2950 vm_pindex_t pindex; 2951 vm_object_t object; 2952 int i; 2953 int noneg; 2954 int nopos; 2955 int maxpages; 2956 2957 /* 2958 * Get stable max count value, disabled if set to 0 2959 */ 2960 maxpages = vm_prefault_pages; 2961 cpu_ccfence(); 2962 if (maxpages <= 0) 2963 return; 2964 2965 /* 2966 * We do not currently prefault mappings that use virtual page 2967 * tables. We do not prefault foreign pmaps. 2968 */ 2969 if (entry->maptype != VM_MAPTYPE_NORMAL) 2970 return; 2971 lp = curthread->td_lwp; 2972 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace))) 2973 return; 2974 object = entry->object.vm_object; 2975 if (object->backing_object != NULL) 2976 return; 2977 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2978 2979 /* 2980 * Limit pre-fault count to 1024 pages. 2981 */ 2982 if (maxpages > 1024) 2983 maxpages = 1024; 2984 2985 noneg = 0; 2986 nopos = 0; 2987 for (i = 0; i < maxpages; ++i) { 2988 int error; 2989 2990 /* 2991 * Calculate the page to pre-fault, stopping the scan in 2992 * each direction separately if the limit is reached. 2993 */ 2994 if (i & 1) { 2995 if (noneg) 2996 continue; 2997 addr = addra - ((i + 1) >> 1) * PAGE_SIZE; 2998 } else { 2999 if (nopos) 3000 continue; 3001 addr = addra + ((i + 2) >> 1) * PAGE_SIZE; 3002 } 3003 if (addr < entry->start) { 3004 noneg = 1; 3005 if (noneg && nopos) 3006 break; 3007 continue; 3008 } 3009 if (addr >= entry->end) { 3010 nopos = 1; 3011 if (noneg && nopos) 3012 break; 3013 continue; 3014 } 3015 3016 /* 3017 * Follow the VM object chain to obtain the page to be mapped 3018 * into the pmap. This version of the prefault code only 3019 * works with terminal objects. 3020 * 3021 * The page must already exist. If we encounter a problem 3022 * we stop here. 3023 * 3024 * WARNING! We cannot call swap_pager_unswapped() or insert 3025 * a new vm_page with a shared token. 3026 */ 3027 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 3028 3029 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error); 3030 if (m == NULL || error) 3031 break; 3032 3033 /* 3034 * Skip pages already mapped, and stop scanning in that 3035 * direction. When the scan terminates in both directions 3036 * we are done. 3037 */ 3038 if (pmap_prefault_ok(pmap, addr) == 0) { 3039 vm_page_wakeup(m); 3040 if (i & 1) 3041 noneg = 1; 3042 else 3043 nopos = 1; 3044 if (noneg && nopos) 3045 break; 3046 continue; 3047 } 3048 3049 /* 3050 * Stop if the page cannot be trivially entered into the 3051 * pmap. 3052 */ 3053 if (((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) || 3054 (m->flags & PG_FICTITIOUS) || 3055 ((m->flags & PG_SWAPPED) && 3056 (prot & VM_PROT_WRITE) && 3057 (fault_flags & VM_FAULT_DIRTY))) { 3058 vm_page_wakeup(m); 3059 break; 3060 } 3061 3062 /* 3063 * Enter the page into the pmap. The object might be held 3064 * shared so we can't do any (serious) modifying operation 3065 * on it. 3066 */ 3067 if ((m->queue - m->pc) == PQ_CACHE) 3068 vm_page_deactivate(m); 3069 if (prot & VM_PROT_WRITE) { 3070 vm_object_set_writeable_dirty(m->object); 3071 vm_set_nosync(m, entry); 3072 if (fault_flags & VM_FAULT_DIRTY) { 3073 vm_page_dirty(m); 3074 /* can't happeen due to conditional above */ 3075 /* swap_pager_unswapped(m); */ 3076 } 3077 } 3078 pmap_enter(pmap, addr, m, prot, 0, entry); 3079 mycpu->gd_cnt.v_vm_faults++; 3080 if (curthread->td_lwp) 3081 ++curthread->td_lwp->lwp_ru.ru_minflt; 3082 vm_page_wakeup(m); 3083 } 3084 } 3085